improvement: federation get_keys and optimize signingkey storage

- get encryption keys over federation
- optimize signing key storage
- rate limit parsing of bad events
- rate limit signature fetching
- dependency bumps
This commit is contained in:
Timo Kösters 2021-05-20 23:46:52 +02:00
parent ae41bc5067
commit 09157b2096
No known key found for this signature in database
GPG key ID: 24DA7517711A2BA4
18 changed files with 566 additions and 371 deletions

View file

@ -8,11 +8,11 @@ use ruma::{
set_room_account_data,
},
},
events::{custom::CustomEventContent, AnyBasicEventContent, BasicEvent},
events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent},
serde::Raw,
};
use serde::Deserialize;
use serde_json::value::RawValue as RawJsonValue;
use serde_json::{json, value::RawValue as RawJsonValue};
#[cfg(feature = "conduit_bin")]
use rocket::{get, put};
@ -28,7 +28,7 @@ pub async fn set_global_account_data_route(
) -> ConduitResult<set_global_account_data::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let data = serde_json::from_str(body.data.get())
let data = serde_json::from_str::<serde_json::Value>(body.data.get())
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
let event_type = body.event_type.to_string();
@ -37,9 +37,10 @@ pub async fn set_global_account_data_route(
None,
sender_user,
event_type.clone().into(),
&BasicEvent {
content: CustomEventContent { event_type, data },
},
&json!({
"type": event_type,
"content": data,
}),
&db.globals,
)?;
@ -71,9 +72,10 @@ pub async fn set_room_account_data_route(
Some(&body.room_id),
sender_user,
event_type.clone().into(),
&BasicEvent {
content: CustomEventContent { event_type, data },
},
&json!({
"type": event_type,
"content": data,
}),
&db.globals,
)?;
@ -99,7 +101,7 @@ pub async fn get_global_account_data_route(
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
db.flush().await?;
let account_data = serde_json::from_str::<ExtractEventContent>(event.get())
let account_data = serde_json::from_str::<ExtractGlobalEventContent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
.content;
@ -130,7 +132,7 @@ pub async fn get_room_account_data_route(
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
db.flush().await?;
let account_data = serde_json::from_str::<ExtractEventContent>(event.get())
let account_data = serde_json::from_str::<ExtractRoomEventContent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
.content;
@ -138,6 +140,11 @@ pub async fn get_room_account_data_route(
}
#[derive(Deserialize)]
struct ExtractEventContent {
content: Raw<AnyBasicEventContent>,
struct ExtractRoomEventContent {
content: Raw<AnyRoomAccountDataEventContent>,
}
#[derive(Deserialize)]
struct ExtractGlobalEventContent {
content: Raw<AnyGlobalAccountDataEventContent>,
}

View file

@ -1,5 +1,5 @@
use super::{State, SESSION_ID_LENGTH};
use crate::{utils, ConduitResult, Database, Error, Ruma};
use crate::{utils, ConduitResult, Database, Error, Result, Ruma};
use ruma::{
api::client::{
error::ErrorKind,
@ -12,6 +12,7 @@ use ruma::{
},
},
encryption::UnsignedDeviceInfo,
DeviceId, UserId,
};
use std::collections::{BTreeMap, HashSet};
@ -78,74 +79,14 @@ pub async fn get_keys_route(
) -> ConduitResult<get_keys::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mut master_keys = BTreeMap::new();
let mut self_signing_keys = BTreeMap::new();
let mut user_signing_keys = BTreeMap::new();
let mut device_keys = BTreeMap::new();
let response = get_keys_helper(
Some(sender_user),
&body.device_keys,
|u| u == sender_user,
&db,
)?;
for (user_id, device_ids) in &body.device_keys {
if device_ids.is_empty() {
let mut container = BTreeMap::new();
for device_id in db.users.all_device_ids(user_id) {
let device_id = device_id?;
if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? {
let metadata = db
.users
.get_device_metadata(user_id, &device_id)?
.ok_or_else(|| {
Error::bad_database("all_device_keys contained nonexistent device.")
})?;
keys.unsigned = UnsignedDeviceInfo {
device_display_name: metadata.display_name,
};
container.insert(device_id, keys);
}
}
device_keys.insert(user_id.clone(), container);
} else {
for device_id in device_ids {
let mut container = BTreeMap::new();
if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), &device_id)? {
let metadata = db.users.get_device_metadata(user_id, &device_id)?.ok_or(
Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to get keys for nonexistent device.",
),
)?;
keys.unsigned = UnsignedDeviceInfo {
device_display_name: metadata.display_name,
};
container.insert(device_id.clone(), keys);
}
device_keys.insert(user_id.clone(), container);
}
}
if let Some(master_key) = db.users.get_master_key(user_id, sender_user)? {
master_keys.insert(user_id.clone(), master_key);
}
if let Some(self_signing_key) = db.users.get_self_signing_key(user_id, sender_user)? {
self_signing_keys.insert(user_id.clone(), self_signing_key);
}
if user_id == sender_user {
if let Some(user_signing_key) = db.users.get_user_signing_key(sender_user)? {
user_signing_keys.insert(user_id.clone(), user_signing_key);
}
}
}
Ok(get_keys::Response {
master_keys,
self_signing_keys,
user_signing_keys,
device_keys,
failures: BTreeMap::new(),
}
.into())
Ok(response.into())
}
#[cfg_attr(
@ -356,3 +297,81 @@ pub async fn get_key_changes_route(
}
.into())
}
pub fn get_keys_helper<F: Fn(&UserId) -> bool>(
sender_user: Option<&UserId>,
device_keys_input: &BTreeMap<UserId, Vec<Box<DeviceId>>>,
allowed_signatures: F,
db: &Database,
) -> Result<get_keys::Response> {
let mut master_keys = BTreeMap::new();
let mut self_signing_keys = BTreeMap::new();
let mut user_signing_keys = BTreeMap::new();
let mut device_keys = BTreeMap::new();
for (user_id, device_ids) in device_keys_input {
if device_ids.is_empty() {
let mut container = BTreeMap::new();
for device_id in db.users.all_device_ids(user_id) {
let device_id = device_id?;
if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? {
let metadata = db
.users
.get_device_metadata(user_id, &device_id)?
.ok_or_else(|| {
Error::bad_database("all_device_keys contained nonexistent device.")
})?;
keys.unsigned = UnsignedDeviceInfo {
device_display_name: metadata.display_name,
};
container.insert(device_id, keys);
}
}
device_keys.insert(user_id.clone(), container);
} else {
for device_id in device_ids {
let mut container = BTreeMap::new();
if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), &device_id)? {
let metadata = db.users.get_device_metadata(user_id, &device_id)?.ok_or(
Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to get keys for nonexistent device.",
),
)?;
keys.unsigned = UnsignedDeviceInfo {
device_display_name: metadata.display_name,
};
container.insert(device_id.clone(), keys);
}
device_keys.insert(user_id.clone(), container);
}
}
if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? {
master_keys.insert(user_id.clone(), master_key);
}
if let Some(self_signing_key) = db
.users
.get_self_signing_key(user_id, &allowed_signatures)?
{
self_signing_keys.insert(user_id.clone(), self_signing_key);
}
if Some(user_id) == sender_user {
if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? {
user_signing_keys.insert(user_id.clone(), user_signing_key);
}
}
}
Ok(get_keys::Response {
master_keys,
self_signing_keys,
user_signing_keys,
device_keys,
failures: BTreeMap::new(),
})
}

View file

@ -4,7 +4,7 @@ use crate::{
pdu::{PduBuilder, PduEvent},
server_server, utils, ConduitResult, Database, Error, Result, Ruma,
};
use log::{error, warn};
use log::{debug, error, warn};
use member::{MemberEventContent, MembershipState};
use rocket::futures;
use ruma::{
@ -29,9 +29,10 @@ use ruma::{
uint, EventId, RoomId, RoomVersionId, ServerName, UserId,
};
use std::{
collections::{BTreeMap, HashSet},
collections::{btree_map::Entry, BTreeMap, HashSet},
convert::{TryFrom, TryInto},
sync::{Arc, RwLock},
time::{Duration, Instant},
};
#[cfg(feature = "conduit_bin")]
@ -703,6 +704,38 @@ async fn validate_and_add_event_id(
error!("{:?}: {:?}", pdu, e);
Error::BadServerResponse("Invalid PDU in server response")
})?;
let event_id = EventId::try_from(&*format!(
"${}",
ruma::signatures::reference_hash(&value, &room_version)
.expect("ruma can calculate reference hashes")
))
.expect("ruma's reference hashes are valid event ids");
let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) {
Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
};
if let Some((time, tries)) = db
.globals
.bad_event_ratelimiter
.read()
.unwrap()
.get(&event_id)
{
// Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
debug!("Backing off from {}", event_id);
return Err(Error::BadServerResponse("bad event, still backing off"));
}
}
server_server::fetch_required_signing_keys(&value, pub_key_map, db).await?;
if let Err(e) = ruma::signatures::verify_event(
@ -712,17 +745,11 @@ async fn validate_and_add_event_id(
&value,
room_version,
) {
warn!("Event failed verification: {}", e);
warn!("Event {} failed verification: {}", event_id, e);
back_off(event_id);
return Err(Error::BadServerResponse("Event failed verification."));
}
let event_id = EventId::try_from(&*format!(
"${}",
ruma::signatures::reference_hash(&value, &room_version)
.expect("ruma can calculate reference hashes")
))
.expect("ruma's reference hashes are valid event ids");
value.insert(
"event_id".to_owned(),
CanonicalJsonValue::String(event_id.as_str().to_owned()),

View file

@ -5,12 +5,14 @@ use ruma::{
error::ErrorKind,
r0::{read_marker::set_read_marker, receipt::create_receipt},
},
events::{AnyEphemeralRoomEvent, AnyEvent, EventType},
events::{AnyEphemeralRoomEvent, EventType},
receipt::ReceiptType,
MilliSecondsSinceUnixEpoch,
};
#[cfg(feature = "conduit_bin")]
use rocket::post;
use std::{collections::BTreeMap, time::SystemTime};
use std::collections::BTreeMap;
#[cfg_attr(
feature = "conduit_bin",
@ -27,7 +29,6 @@ pub async fn set_read_marker_route(
content: ruma::events::fully_read::FullyReadEventContent {
event_id: body.fully_read.clone(),
},
room_id: body.room_id.clone(),
};
db.account_data.update(
Some(&body.room_id),
@ -54,26 +55,23 @@ pub async fn set_read_marker_route(
user_receipts.insert(
sender_user.clone(),
ruma::events::receipt::Receipt {
ts: Some(SystemTime::now()),
ts: Some(MilliSecondsSinceUnixEpoch::now()),
},
);
let mut receipts = BTreeMap::new();
receipts.insert(ReceiptType::Read, user_receipts);
let mut receipt_content = BTreeMap::new();
receipt_content.insert(
event.to_owned(),
ruma::events::receipt::Receipts {
read: Some(user_receipts),
},
);
receipt_content.insert(event.to_owned(), receipts);
db.rooms.edus.readreceipt_update(
&sender_user,
&body.room_id,
AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt(
ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
room_id: body.room_id.clone(),
},
)),
AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
room_id: body.room_id.clone(),
}),
&db.globals,
)?;
}
@ -112,26 +110,22 @@ pub async fn create_receipt_route(
user_receipts.insert(
sender_user.clone(),
ruma::events::receipt::Receipt {
ts: Some(SystemTime::now()),
ts: Some(MilliSecondsSinceUnixEpoch::now()),
},
);
let mut receipts = BTreeMap::new();
receipts.insert(ReceiptType::Read, user_receipts);
let mut receipt_content = BTreeMap::new();
receipt_content.insert(
body.event_id.to_owned(),
ruma::events::receipt::Receipts {
read: Some(user_receipts),
},
);
receipt_content.insert(body.event_id.to_owned(), receipts);
db.rooms.edus.readreceipt_update(
&sender_user,
&body.room_id,
AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt(
ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
room_id: body.room_id.clone(),
},
)),
AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
room_id: body.room_id.clone(),
}),
&db.globals,
)?;

View file

@ -422,7 +422,7 @@ pub async fn sync_events_route(
}
let joined_room = sync_events::JoinedRoom {
account_data: sync_events::AccountData {
account_data: sync_events::RoomAccountData {
events: db
.account_data
.changes_since(Some(&room_id), &sender_user, since)?
@ -506,7 +506,7 @@ pub async fn sync_events_route(
left_rooms.insert(
room_id.clone(),
sync_events::LeftRoom {
account_data: sync_events::AccountData { events: Vec::new() },
account_data: sync_events::RoomAccountData { events: Vec::new() },
timeline: sync_events::Timeline {
limited: false,
prev_batch: Some(next_batch.clone()),
@ -577,7 +577,7 @@ pub async fn sync_events_route(
.map(|(_, v)| Raw::from(v))
.collect(),
},
account_data: sync_events::AccountData {
account_data: sync_events::GlobalAccountData {
events: db
.account_data
.changes_since(None, &sender_user, since)?

View file

@ -213,7 +213,7 @@ impl Database {
pusher: pusher::PushData::new(&db)?,
globals: globals::Globals::load(
db.open_tree("global")?,
db.open_tree("servertimeout_signingkey")?,
db.open_tree("server_signingkeys")?,
config,
)?,
_db: db,

View file

@ -1,7 +1,7 @@
use crate::{utils, Error, Result};
use ruma::{
api::client::error::ErrorKind,
events::{AnyEvent as EduEvent, EventType},
events::{AnyEphemeralRoomEvent, EventType},
serde::Raw,
RoomId, UserId,
};
@ -80,7 +80,7 @@ impl AccountData {
room_id: Option<&RoomId>,
user_id: &UserId,
since: u64,
) -> Result<HashMap<EventType, Raw<EduEvent>>> {
) -> Result<HashMap<EventType, Raw<AnyEphemeralRoomEvent>>> {
let mut userdata = HashMap::new();
let mut prefix = room_id
@ -110,7 +110,7 @@ impl AccountData {
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
)
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
serde_json::from_slice::<Raw<EduEvent>>(&v).map_err(|_| {
serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| {
Error::bad_database("Database contains invalid account data.")
})?,
))

View file

@ -2,20 +2,22 @@ use crate::{database::Config, utils, Error, Result};
use log::{error, info};
use ruma::{
api::federation::discovery::{ServerSigningKeys, VerifyKey},
ServerName, ServerSigningKeyId,
EventId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId,
};
use rustls::{ServerCertVerifier, WebPKIVerifier};
use std::{
collections::{BTreeMap, HashMap},
sync::{Arc, RwLock},
time::Duration,
time::{Duration, Instant},
};
use tokio::sync::Semaphore;
use trust_dns_resolver::TokioAsyncResolver;
pub const COUNTER: &str = "c";
type WellKnownMap = HashMap<Box<ServerName>, (String, String)>;
type TlsNameMap = HashMap<String, webpki::DNSName>;
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
#[derive(Clone)]
pub struct Globals {
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
@ -26,7 +28,10 @@ pub struct Globals {
reqwest_client: reqwest::Client,
dns_resolver: TokioAsyncResolver,
jwt_decoding_key: Option<jsonwebtoken::DecodingKey<'static>>,
pub(super) servertimeout_signingkey: sled::Tree, // ServerName + Timeout Timestamp -> algorithm:key + pubkey
pub(super) server_signingkeys: sled::Tree,
pub bad_event_ratelimiter: Arc<RwLock<BTreeMap<EventId, RateLimitState>>>,
pub bad_signature_ratelimiter: Arc<RwLock<BTreeMap<Vec<String>, RateLimitState>>>,
pub servername_ratelimiter: Arc<RwLock<BTreeMap<Box<ServerName>, Arc<Semaphore>>>>,
}
struct MatrixServerVerifier {
@ -65,7 +70,7 @@ impl ServerCertVerifier for MatrixServerVerifier {
impl Globals {
pub fn load(
globals: sled::Tree,
servertimeout_signingkey: sled::Tree,
server_signingkeys: sled::Tree,
config: Config,
) -> Result<Self> {
let bytes = &*globals
@ -135,8 +140,11 @@ impl Globals {
})?,
actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())),
tls_name_override,
servertimeout_signingkey,
server_signingkeys,
jwt_decoding_key,
bad_event_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
})
}
@ -203,31 +211,21 @@ impl Globals {
/// Remove the outdated keys and insert the new ones.
///
/// This doesn't actually check that the keys provided are newer than the old set.
pub fn add_signing_key(&self, origin: &ServerName, keys: &ServerSigningKeys) -> Result<()> {
let mut key1 = origin.as_bytes().to_vec();
key1.push(0xff);
let mut key2 = key1.clone();
let ts = keys
.valid_until_ts
.duration_since(std::time::UNIX_EPOCH)
.expect("time is valid")
.as_millis() as u64;
key1.extend_from_slice(&ts.to_be_bytes());
key2.extend_from_slice(&(ts + 1).to_be_bytes());
self.servertimeout_signingkey.insert(
key1,
serde_json::to_vec(&keys.verify_keys).expect("ServerSigningKeys are a valid string"),
)?;
self.servertimeout_signingkey.insert(
key2,
serde_json::to_vec(&keys.old_verify_keys)
.expect("ServerSigningKeys are a valid string"),
)?;
pub fn add_signing_key(&self, origin: &ServerName, new_keys: &ServerSigningKeys) -> Result<()> {
self.server_signingkeys
.update_and_fetch(origin.as_bytes(), |signingkeys| {
let mut keys = signingkeys
.and_then(|keys| serde_json::from_slice(keys).ok())
.unwrap_or_else(|| {
// Just insert "now", it doesn't matter
ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
});
keys.verify_keys
.extend(new_keys.verify_keys.clone().into_iter());
keys.old_verify_keys
.extend(new_keys.old_verify_keys.clone().into_iter());
Some(serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"))
})?;
Ok(())
}
@ -237,26 +235,22 @@ impl Globals {
&self,
origin: &ServerName,
) -> Result<BTreeMap<ServerSigningKeyId, VerifyKey>> {
let mut response = BTreeMap::new();
let signingkeys = self
.server_signingkeys
.get(origin.as_bytes())?
.and_then(|bytes| serde_json::from_slice::<ServerSigningKeys>(&bytes).ok())
.map(|keys| {
let mut tree = keys.verify_keys;
tree.extend(
keys.old_verify_keys
.into_iter()
.map(|old| (old.0, VerifyKey::new(old.1.key))),
);
tree
})
.unwrap_or_else(BTreeMap::new);
let now = crate::utils::millis_since_unix_epoch();
for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) {
let (k, bytes) = item?;
let valid_until = k
.splitn(2, |&b| b == 0xff)
.nth(1)
.map(crate::utils::u64_from_bytes)
.ok_or_else(|| Error::bad_database("Invalid signing keys."))?
.map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?;
// If these keys are still valid use em!
if valid_until > now {
let btree: BTreeMap<_, _> = serde_json::from_slice(&bytes)
.map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys"))?;
response.extend(btree);
}
}
Ok(response)
Ok(signingkeys)
}
pub fn database_version(&self) -> Result<u64> {

View file

@ -294,7 +294,8 @@ async fn send_notice(
} else {
notifi.sender = Some(&event.sender);
notifi.event_type = Some(&event.kind);
notifi.content = serde_json::value::to_raw_value(&event.content).ok();
let content = serde_json::value::to_raw_value(&event.content).ok();
notifi.content = content.as_deref();
if event.kind == EventType::RoomMember {
notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str());

View file

@ -2,7 +2,7 @@ use crate::{utils, Error, Result};
use ruma::{
events::{
presence::{PresenceEvent, PresenceEventContent},
AnyEvent as EduEvent, SyncEphemeralRoomEvent,
AnyEphemeralRoomEvent, SyncEphemeralRoomEvent,
},
presence::PresenceState,
serde::Raw,
@ -32,7 +32,7 @@ impl RoomEdus {
&self,
user_id: &UserId,
room_id: &RoomId,
event: EduEvent,
event: AnyEphemeralRoomEvent,
globals: &super::super::globals::Globals,
) -> Result<()> {
let mut prefix = room_id.as_bytes().to_vec();

View file

@ -3,7 +3,7 @@ use std::{
convert::{TryFrom, TryInto},
fmt::Debug,
sync::Arc,
time::{Duration, Instant, SystemTime},
time::{Duration, Instant},
};
use crate::{
@ -23,7 +23,9 @@ use ruma::{
OutgoingRequest,
},
events::{push_rules, AnySyncEphemeralRoomEvent, EventType},
push, ServerName, UInt, UserId,
push,
receipt::ReceiptType,
MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId,
};
use sled::IVec;
use tokio::{select, sync::Semaphore};
@ -277,17 +279,14 @@ impl Sending {
events.push(e);
}
match outgoing_kind {
OutgoingKind::Normal(server_name) => {
if let Ok((select_edus, last_count)) = Self::select_edus(db, server_name) {
events.extend_from_slice(&select_edus);
db.sending
.servername_educount
.insert(server_name.as_bytes(), &last_count.to_be_bytes())
.unwrap();
}
if let OutgoingKind::Normal(server_name) = outgoing_kind {
if let Ok((select_edus, last_count)) = Self::select_edus(db, server_name) {
events.extend_from_slice(&select_edus);
db.sending
.servername_educount
.insert(server_name.as_bytes(), &last_count.to_be_bytes())
.unwrap();
}
_ => {}
}
}
@ -326,14 +325,14 @@ impl Sending {
AnySyncEphemeralRoomEvent::Receipt(r) => {
let mut read = BTreeMap::new();
let (event_id, receipt) = r
let (event_id, mut receipt) = r
.content
.0
.into_iter()
.next()
.expect("we only use one event per read receipt");
let receipt = receipt
.read
.remove(&ReceiptType::Read)
.expect("our read receipts always set this")
.remove(&user_id)
.expect("our read receipts always have the user here");
@ -436,7 +435,7 @@ impl Sending {
),
)
})?
.to_any_event())
.to_room_event())
}
SendingEventType::Edu(_) => {
// Appservices don't need EDUs (?)
@ -610,7 +609,7 @@ impl Sending {
origin: db.globals.server_name(),
pdus: &pdu_jsons,
edus: &edu_jsons,
origin_server_ts: SystemTime::now(),
origin_server_ts: MilliSecondsSinceUnixEpoch::now(),
transaction_id: &base64::encode_config(
Self::calculate_hash(
&events

View file

@ -1,19 +1,13 @@
use crate::{utils, Error, Result};
use ruma::{
api::client::{
error::ErrorKind,
r0::{
device::Device,
keys::{CrossSigningKey, OneTimeKey},
},
},
encryption::DeviceKeys,
api::client::{error::ErrorKind, r0::device::Device},
encryption::{CrossSigningKey, DeviceKeys, OneTimeKey},
events::{AnyToDeviceEvent, EventType},
identifiers::MxcUri,
serde::Raw,
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UInt, UserId,
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, UInt, UserId,
};
use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime};
use std::{collections::BTreeMap, convert::TryFrom, mem};
#[derive(Clone)]
pub struct Users {
@ -200,7 +194,7 @@ impl Users {
device_id: device_id.into(),
display_name: initial_device_display_name,
last_seen_ip: None, // TODO
last_seen_ts: Some(SystemTime::now()),
last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()),
})
.expect("Device::to_string never fails.")
.as_bytes(),
@ -653,12 +647,11 @@ impl Users {
})
}
pub fn get_master_key(
pub fn get_master_key<F: Fn(&UserId) -> bool>(
&self,
user_id: &UserId,
sender_id: &UserId,
allowed_signatures: F,
) -> Result<Option<CrossSigningKey>> {
// TODO: hide some signatures
self.userid_masterkeyid
.get(user_id.to_string())?
.map_or(Ok(None), |key| {
@ -673,7 +666,7 @@ impl Users {
cross_signing_key.signatures = cross_signing_key
.signatures
.into_iter()
.filter(|(user, _)| user == user_id || user == sender_id)
.filter(|(user, _)| allowed_signatures(user))
.collect();
Ok(Some(cross_signing_key))
@ -681,10 +674,10 @@ impl Users {
})
}
pub fn get_self_signing_key(
pub fn get_self_signing_key<F: Fn(&UserId) -> bool>(
&self,
user_id: &UserId,
sender_id: &UserId,
allowed_signatures: F,
) -> Result<Option<CrossSigningKey>> {
self.userid_selfsigningkeyid
.get(user_id.to_string())?
@ -700,7 +693,7 @@ impl Users {
cross_signing_key.signatures = cross_signing_key
.signatures
.into_iter()
.filter(|(user, _)| user == user_id || user == sender_id)
.filter(|(user, _)| user == user_id || allowed_signatures(user))
.collect();
Ok(Some(cross_signing_key))

View file

@ -158,6 +158,7 @@ fn setup_rocket(config: Figment, data: Database) -> rocket::Rocket<rocket::Build
server_server::get_devices_route,
server_server::get_room_information_route,
server_server::get_profile_information_route,
server_server::get_keys_route,
],
)
.register(

View file

@ -2,15 +2,17 @@ use crate::Error;
use log::error;
use ruma::{
events::{
pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent,
AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent,
pdu::EventHash, room::member::MemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent,
AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType,
StateEvent,
},
serde::{CanonicalJsonObject, CanonicalJsonValue, Raw},
state_res, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId,
state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName,
ServerSigningKeyId, UInt, UserId,
};
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH};
use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom};
#[derive(Clone, Deserialize, Serialize, Debug)]
pub struct PduEvent {
@ -105,7 +107,7 @@ impl PduEvent {
/// This only works for events that are also AnyRoomEvents.
#[tracing::instrument(skip(self))]
pub fn to_any_event(&self) -> Raw<AnyEvent> {
pub fn to_any_event(&self) -> Raw<AnyEphemeralRoomEvent> {
let mut json = json!({
"content": self.content,
"type": self.kind,
@ -267,10 +269,9 @@ impl state_res::Event for PduEvent {
fn content(&self) -> serde_json::Value {
self.content.clone()
}
fn origin_server_ts(&self) -> std::time::SystemTime {
UNIX_EPOCH + std::time::Duration::from_millis(self.origin_server_ts.into())
fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch {
MilliSecondsSinceUnixEpoch(self.origin_server_ts)
}
fn state_key(&self) -> Option<String> {
self.state_key.clone()
}

View file

@ -34,6 +34,7 @@ pub struct Ruma<T: Outgoing> {
pub body: T::Incoming,
pub sender_user: Option<UserId>,
pub sender_device: Option<Box<DeviceId>>,
pub sender_servername: Option<Box<ServerName>>,
// This is None when body is not a valid string
pub json_body: Option<CanonicalJsonValue>,
pub from_appservice: bool,
@ -68,7 +69,10 @@ where
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
let (sender_user, sender_device, from_appservice) = if let Some((_id, registration)) = db
let (sender_user, sender_device, sender_servername, from_appservice) = if let Some((
_id,
registration,
)) = db
.appservice
.iter_all()
.filter_map(|r| r.ok())
@ -104,10 +108,10 @@ where
}
// TODO: Check if appservice is allowed to be that user
(Some(user_id), None, true)
(Some(user_id), None, None, true)
}
AuthScheme::ServerSignatures => (None, None, true),
AuthScheme::None => (None, None, true),
AuthScheme::ServerSignatures => (None, None, None, true),
AuthScheme::None => (None, None, None, true),
}
} else {
match metadata.authentication {
@ -116,9 +120,12 @@ where
match db.users.find_from_token(&token).unwrap() {
// Unknown Token
None => return Failure((Status::raw(581), ())),
Some((user_id, device_id)) => {
(Some(user_id), Some(Box::<DeviceId>::from(device_id)), false)
}
Some((user_id, device_id)) => (
Some(user_id),
Some(Box::<DeviceId>::from(device_id)),
None,
false,
),
}
} else {
// Missing Token
@ -227,27 +234,24 @@ where
CanonicalJsonValue::Object(signatures),
);
let keys = match server_server::fetch_signing_keys(
&db,
&origin,
vec![&key.to_owned()],
)
.await
{
Ok(b) => b,
Err(e) => {
warn!("Failed to fetch signing keys: {}", e);
let keys =
match server_server::fetch_signing_keys(&db, &origin, vec![key.to_owned()])
.await
{
Ok(b) => b,
Err(e) => {
warn!("Failed to fetch signing keys: {}", e);
// Forbidden
return Failure((Status::raw(580), ()));
}
};
// Forbidden
return Failure((Status::raw(580), ()));
}
};
let mut pub_key_map = BTreeMap::new();
pub_key_map.insert(origin.as_str().to_owned(), keys);
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
Ok(()) => (None, None, false),
Ok(()) => (None, None, Some(origin), false),
Err(e) => {
warn!("Failed to verify json request from {}: {}", origin, e);
@ -260,7 +264,7 @@ where
}
}
}
AuthScheme::None => (None, None, false),
AuthScheme::None => (None, None, None, false),
}
};
@ -307,6 +311,7 @@ where
body: t,
sender_user,
sender_device,
sender_servername,
from_appservice,
json_body,
}),

View file

@ -1,7 +1,10 @@
use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma};
use crate::{
client_server::{self, get_keys_helper},
utils, ConduitResult, Database, Error, PduEvent, Result, Ruma,
};
use get_profile_information::v1::ProfileField;
use http::header::{HeaderValue, AUTHORIZATION, HOST};
use log::{debug, error, info, warn};
use log::{debug, error, info, trace, warn};
use regex::Regex;
use rocket::{response::content::Json, State};
use ruma::{
@ -15,6 +18,7 @@ use ruma::{
VerifyKey,
},
event::{get_event, get_missing_events, get_room_state_ids},
keys::get_keys,
membership::{
create_invite,
create_join_event::{self, RoomState},
@ -32,12 +36,14 @@ use ruma::{
create::CreateEventContent,
member::{MemberEventContent, MembershipState},
},
AnyEphemeralRoomEvent, AnyEvent as EduEvent, EventType,
AnyEphemeralRoomEvent, EventType,
},
receipt::ReceiptType,
serde::Raw,
signatures::{CanonicalJsonObject, CanonicalJsonValue},
state_res::{self, Event, EventMap, RoomVersion, StateMap},
uint, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId,
uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName,
ServerSigningKeyId, UserId,
};
use std::{
collections::{btree_map::Entry, BTreeMap, BTreeSet, HashSet},
@ -49,8 +55,9 @@ use std::{
pin::Pin,
result::Result as StdResult,
sync::{Arc, RwLock},
time::{Duration, SystemTime},
time::{Duration, Instant, SystemTime},
};
use tokio::sync::Semaphore;
#[cfg(feature = "conduit_bin")]
use rocket::{get, post, put};
@ -452,7 +459,10 @@ pub fn get_server_keys_route(db: State<'_, Database>) -> Json<String> {
verify_keys,
old_verify_keys: BTreeMap::new(),
signatures: BTreeMap::new(),
valid_until_ts: SystemTime::now() + Duration::from_secs(60 * 2),
valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(60 * 2),
)
.expect("time is valid"),
},
}
.try_into_http_response::<Vec<u8>>()
@ -608,6 +618,7 @@ pub async fn send_transaction_message_route<'a>(
}
};
let start_time = Instant::now();
if let Err(e) = handle_incoming_pdu(
&body.origin,
&event_id,
@ -619,7 +630,17 @@ pub async fn send_transaction_message_route<'a>(
)
.await
{
resolved_map.insert(event_id, Err(e));
resolved_map.insert(event_id.clone(), Err(e));
}
let elapsed = start_time.elapsed();
if elapsed > Duration::from_secs(1) {
warn!(
"Handling event {} took {}m{}s",
event_id,
elapsed.as_secs() / 60,
elapsed.as_secs() % 60
);
}
}
@ -653,19 +674,16 @@ pub async fn send_transaction_message_route<'a>(
let mut user_receipts = BTreeMap::new();
user_receipts.insert(user_id.clone(), user_updates.data);
let mut receipt_content = BTreeMap::new();
receipt_content.insert(
event_id.to_owned(),
ruma::events::receipt::Receipts {
read: Some(user_receipts),
},
);
let mut receipts = BTreeMap::new();
receipts.insert(ReceiptType::Read, user_receipts);
let event =
EduEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt(ReceiptEvent {
content: ReceiptEventContent(receipt_content),
room_id: room_id.clone(),
}));
let mut receipt_content = BTreeMap::new();
receipt_content.insert(event_id.to_owned(), receipts);
let event = AnyEphemeralRoomEvent::Receipt(ReceiptEvent {
content: ReceiptEventContent(receipt_content),
room_id: room_id.clone(),
});
db.rooms.edus.readreceipt_update(
&user_id,
&room_id,
@ -698,6 +716,8 @@ pub async fn send_transaction_message_route<'a>(
}
}
info!("/send/{} done", body.transaction_id);
Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into())
}
@ -794,7 +814,7 @@ pub fn handle_incoming_pdu<'a>(
) {
Err(e) => {
// Drop
warn!("{:?}: {}", value, e);
warn!("Dropping bad event {}: {}", event_id, e);
return Err("Signature verification failed".to_string());
}
Ok(ruma::signatures::Verified::Signatures) => {
@ -821,6 +841,7 @@ pub fn handle_incoming_pdu<'a>(
// 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events
// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
// EDIT: Step 5 is not applied anymore because it failed too often
debug!("Fetching auth events for {}", incoming_pdu.event_id);
fetch_and_handle_events(
db,
@ -1292,12 +1313,30 @@ pub(crate) fn fetch_and_handle_events<'a>(
auth_cache: &'a mut EventMap<Arc<PduEvent>>,
) -> AsyncRecursiveResult<'a, Vec<Arc<PduEvent>>, Error> {
Box::pin(async move {
let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) {
Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
};
let mut pdus = vec![];
for id in events {
if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&id) {
// Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
debug!("Backing off from {}", id);
continue;
}
}
// a. Look at auth cache
let pdu = match auth_cache.get(id) {
Some(pdu) => {
debug!("Found {} in cache", id);
// We already have the auth chain for events in cache
pdu.clone()
}
@ -1306,7 +1345,7 @@ pub(crate) fn fetch_and_handle_events<'a>(
// (get_pdu checks both)
None => match db.rooms.get_pdu(&id)? {
Some(pdu) => {
debug!("Found {} in db", id);
trace!("Found {} in db", id);
// We need to fetch the auth chain
let _ = fetch_and_handle_events(
db,
@ -1331,7 +1370,7 @@ pub(crate) fn fetch_and_handle_events<'a>(
.await
{
Ok(res) => {
debug!("Got {} over federation: {:?}", id, res);
debug!("Got {} over federation", id);
let (event_id, mut value) =
crate::pdu::gen_event_id_canonical_json(&res.pdu)?;
// This will also fetch the auth chain
@ -1358,12 +1397,14 @@ pub(crate) fn fetch_and_handle_events<'a>(
}
Err(e) => {
warn!("Authentication of event {} failed: {:?}", id, e);
back_off(id.clone());
continue;
}
}
}
Err(_) => {
warn!("Failed to fetch event: {}", id);
back_off(id.clone());
continue;
}
}
@ -1383,10 +1424,67 @@ pub(crate) fn fetch_and_handle_events<'a>(
pub(crate) async fn fetch_signing_keys(
db: &Database,
origin: &ServerName,
signature_ids: Vec<&String>,
signature_ids: Vec<String>,
) -> Result<BTreeMap<String, String>> {
let contains_all_ids =
|keys: &BTreeMap<String, String>| signature_ids.iter().all(|&id| keys.contains_key(id));
|keys: &BTreeMap<String, String>| signature_ids.iter().all(|id| keys.contains_key(id));
let permit = db
.globals
.servername_ratelimiter
.read()
.unwrap()
.get(origin)
.map(|s| Arc::clone(s).acquire_owned());
let permit = match permit {
Some(p) => p,
None => {
let mut write = db.globals.servername_ratelimiter.write().unwrap();
let s = Arc::clone(
write
.entry(origin.to_owned())
.or_insert_with(|| Arc::new(Semaphore::new(1))),
);
s.acquire_owned()
}
}
.await;
let back_off = |id| match db
.globals
.bad_signature_ratelimiter
.write()
.unwrap()
.entry(id)
{
Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
};
if let Some((time, tries)) = db
.globals
.bad_signature_ratelimiter
.read()
.unwrap()
.get(&signature_ids)
{
// Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
debug!("Backing off from {:?}", signature_ids);
return Err(Error::BadServerResponse("bad signature, still backing off"));
}
}
debug!("Loading signing keys for {}", origin);
let mut result = db
.globals
@ -1399,6 +1497,8 @@ pub(crate) async fn fetch_signing_keys(
return Ok(result);
}
debug!("Fetching signing keys for {} over federation", origin);
if let Ok(get_keys_response) = db
.sending
.send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new())
@ -1436,14 +1536,17 @@ pub(crate) async fn fetch_signing_keys(
&server,
get_remote_server_keys::v2::Request::new(
origin,
SystemTime::now()
.checked_add(Duration::from_secs(3600))
.expect("SystemTime to large"),
MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now()
.checked_add(Duration::from_secs(3600))
.expect("SystemTime to large"),
)
.expect("time is valid"),
),
)
.await
{
debug!("Got signing keys: {:?}", keys);
trace!("Got signing keys: {:?}", keys);
for k in keys.server_keys {
db.globals.add_signing_key(origin, &k)?;
result.extend(
@ -1464,6 +1567,10 @@ pub(crate) async fn fetch_signing_keys(
}
}
drop(permit);
back_off(signature_ids);
warn!("Failed to find public key for server: {}", origin);
Err(Error::BadServerResponse(
"Failed to find public key for server",
@ -1581,7 +1688,7 @@ pub fn get_event_route<'a>(
Ok(get_event::v1::Response {
origin: db.globals.server_name().to_owned(),
origin_server_ts: SystemTime::now(),
origin_server_ts: MilliSecondsSinceUnixEpoch::now(),
pdu: PduEvent::convert_to_outgoing_federation_event(
db.rooms
.get_pdu_json(&body.event_id)?
@ -2186,6 +2293,34 @@ pub fn get_profile_information_route<'a>(
.into())
}
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/federation/v1/user/keys/query", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub fn get_keys_route<'a>(
db: State<'a, Database>,
body: Ruma<get_keys::v1::Request>,
) -> ConduitResult<get_keys::v1::Response> {
if !db.globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
let result = get_keys_helper(
None,
&body.device_keys,
|u| Some(u.server_name()) == body.sender_servername.as_deref(),
&db,
)?;
Ok(get_keys::v1::Response {
device_keys: result.device_keys,
master_keys: result.master_keys,
self_signing_keys: result.self_signing_keys,
}
.into())
}
pub async fn fetch_required_signing_keys(
event: &BTreeMap<String, CanonicalJsonValue>,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>,
@ -2208,9 +2343,8 @@ pub async fn fetch_required_signing_keys(
"Invalid signatures content object in server response pdu.",
))?;
let signature_ids = signature_object.keys().collect::<Vec<_>>();
let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>();
debug!("Fetching signing keys for {}", signature_server);
let fetch_res = fetch_signing_keys(
db,
&Box::<ServerName>::try_from(&**signature_server).map_err(|_| {