improvement: pdu cache, /sync cache

This commit is contained in:
Timo Kösters 2021-06-30 09:52:01 +02:00
parent dcac1361ec
commit 05821d6fd5
No known key found for this signature in database
GPG key ID: 24DA7517711A2BA4
16 changed files with 424 additions and 243 deletions

View file

@ -200,84 +200,84 @@ pub async fn get_public_rooms_filtered_helper(
}
}
let mut all_rooms = db
.rooms
.public_rooms()
.map(|room_id| {
let room_id = room_id?;
let mut all_rooms =
db.rooms
.public_rooms()
.map(|room_id| {
let room_id = room_id?;
let chunk = PublicRoomsChunk {
aliases: Vec::new(),
canonical_alias: db
.rooms
.room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")?
.map_or(Ok::<_, Error>(None), |s| {
Ok(
serde_json::from_value::<
let chunk = PublicRoomsChunk {
aliases: Vec::new(),
canonical_alias: db
.rooms
.room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")?
.map_or(Ok::<_, Error>(None), |s| {
Ok(serde_json::from_value::<
Raw<canonical_alias::CanonicalAliasEventContent>,
>(s.content)
>(s.content.clone())
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| {
Error::bad_database("Invalid canonical alias event in database.")
})?
.alias,
)
})?,
name: db
.rooms
.room_state_get(&room_id, &EventType::RoomName, "")?
.map_or(Ok::<_, Error>(None), |s| {
Ok(
serde_json::from_value::<Raw<name::NameEventContent>>(s.content)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| {
Error::bad_database("Invalid room name event in database.")
})?
.name()
.map(|n| n.to_owned()),
)
})?,
num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(),
topic: db
.rooms
.room_state_get(&room_id, &EventType::RoomTopic, "")?
.map_or(Ok::<_, Error>(None), |s| {
Ok(Some(
serde_json::from_value::<Raw<topic::TopicEventContent>>(s.content)
.alias)
})?,
name: db
.rooms
.room_state_get(&room_id, &EventType::RoomName, "")?
.map_or(Ok::<_, Error>(None), |s| {
Ok(serde_json::from_value::<Raw<name::NameEventContent>>(
s.content.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| {
Error::bad_database("Invalid room name event in database.")
})?
.name()
.map(|n| n.to_owned()))
})?,
num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(),
topic: db
.rooms
.room_state_get(&room_id, &EventType::RoomTopic, "")?
.map_or(Ok::<_, Error>(None), |s| {
Ok(Some(
serde_json::from_value::<Raw<topic::TopicEventContent>>(
s.content.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| {
Error::bad_database("Invalid room topic event in database.")
})?
.topic,
))
})?,
world_readable: db
.rooms
.room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")?
.map_or(Ok::<_, Error>(false), |s| {
Ok(serde_json::from_value::<
Raw<history_visibility::HistoryVisibilityEventContent>,
>(s.content)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| {
Error::bad_database(
"Invalid room history visibility event in database.",
)
})?
.history_visibility
== history_visibility::HistoryVisibility::WorldReadable)
})?,
guest_can_join: db
.rooms
.room_state_get(&room_id, &EventType::RoomGuestAccess, "")?
.map_or(Ok::<_, Error>(false), |s| {
Ok(
))
})?,
world_readable: db
.rooms
.room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")?
.map_or(Ok::<_, Error>(false), |s| {
Ok(serde_json::from_value::<
Raw<history_visibility::HistoryVisibilityEventContent>,
>(s.content.clone())
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| {
Error::bad_database(
"Invalid room history visibility event in database.",
)
})?
.history_visibility
== history_visibility::HistoryVisibility::WorldReadable)
})?,
guest_can_join: db
.rooms
.room_state_get(&room_id, &EventType::RoomGuestAccess, "")?
.map_or(Ok::<_, Error>(false), |s| {
Ok(
serde_json::from_value::<Raw<guest_access::GuestAccessEventContent>>(
s.content,
s.content.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
@ -287,61 +287,63 @@ pub async fn get_public_rooms_filtered_helper(
.guest_access
== guest_access::GuestAccess::CanJoin,
)
})?,
avatar_url: db
.rooms
.room_state_get(&room_id, &EventType::RoomAvatar, "")?
.map(|s| {
Ok::<_, Error>(
serde_json::from_value::<Raw<avatar::AvatarEventContent>>(s.content)
})?,
avatar_url: db
.rooms
.room_state_get(&room_id, &EventType::RoomAvatar, "")?
.map(|s| {
Ok::<_, Error>(
serde_json::from_value::<Raw<avatar::AvatarEventContent>>(
s.content.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| {
Error::bad_database("Invalid room avatar event in database.")
})?
.url,
)
})
.transpose()?
// url is now an Option<String> so we must flatten
.flatten(),
room_id,
};
Ok(chunk)
})
.filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms
.filter(|chunk| {
if let Some(query) = filter
.generic_search_term
.as_ref()
.map(|q| q.to_lowercase())
{
if let Some(name) = &chunk.name {
if name.to_lowercase().contains(&query) {
return true;
)
})
.transpose()?
// url is now an Option<String> so we must flatten
.flatten(),
room_id,
};
Ok(chunk)
})
.filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms
.filter(|chunk| {
if let Some(query) = filter
.generic_search_term
.as_ref()
.map(|q| q.to_lowercase())
{
if let Some(name) = &chunk.name {
if name.to_lowercase().contains(&query) {
return true;
}
}
}
if let Some(topic) = &chunk.topic {
if topic.to_lowercase().contains(&query) {
return true;
if let Some(topic) = &chunk.topic {
if topic.to_lowercase().contains(&query) {
return true;
}
}
}
if let Some(canonical_alias) = &chunk.canonical_alias {
if canonical_alias.as_str().to_lowercase().contains(&query) {
return true;
if let Some(canonical_alias) = &chunk.canonical_alias {
if canonical_alias.as_str().to_lowercase().contains(&query) {
return true;
}
}
}
false
} else {
// No search term
true
}
})
// We need to collect all, so we can sort by member count
.collect::<Vec<_>>();
false
} else {
// No search term
true
}
})
// We need to collect all, so we can sort by member count
.collect::<Vec<_>>();
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));

View file

@ -189,7 +189,8 @@ pub async fn kick_user_route(
ErrorKind::BadState,
"Cannot kick member that's not in the room.",
))?
.content,
.content
.clone(),
)
.expect("Raw::from_value always works")
.deserialize()
@ -245,11 +246,12 @@ pub async fn ban_user_route(
third_party_invite: None,
}),
|event| {
let mut event =
serde_json::from_value::<Raw<member::MemberEventContent>>(event.content)
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
let mut event = serde_json::from_value::<Raw<member::MemberEventContent>>(
event.content.clone(),
)
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
event.membership = ruma::events::room::member::MembershipState::Ban;
Ok(event)
},
@ -295,7 +297,8 @@ pub async fn unban_user_route(
ErrorKind::BadState,
"Cannot unban a user who is not banned.",
))?
.content,
.content
.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
@ -753,7 +756,7 @@ pub async fn invite_helper(
let create_prev_event = if prev_events.len() == 1
&& Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id)
{
create_event.map(Arc::new)
create_event
} else {
None
};
@ -792,10 +795,10 @@ pub async fn invite_helper(
let mut unsigned = BTreeMap::new();
if let Some(prev_pdu) = db.rooms.room_state_get(room_id, &kind, &state_key)? {
unsigned.insert("prev_content".to_owned(), prev_pdu.content);
unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone());
unsigned.insert(
"prev_sender".to_owned(),
serde_json::to_value(prev_pdu.sender).expect("UserId::to_value always works"),
serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"),
);
}

View file

@ -53,7 +53,8 @@ pub async fn set_displayname_route(
room.",
)
})?
.content,
.content
.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
@ -154,7 +155,8 @@ pub async fn set_avatar_url_route(
room.",
)
})?
.content,
.content
.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()

View file

@ -362,7 +362,8 @@ pub async fn upgrade_room_route(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomCreate, "")?
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
.content,
.content
.clone(),
)
.expect("Raw::from_value always works")
.deserialize()
@ -463,7 +464,8 @@ pub async fn upgrade_room_route(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")?
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
.content,
.content
.clone(),
)
.expect("database contains invalid PDU")
.deserialize()

View file

@ -92,7 +92,7 @@ pub async fn get_state_events_route(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")?
.map(|event| {
serde_json::from_value::<HistoryVisibilityEventContent>(event.content)
serde_json::from_value::<HistoryVisibilityEventContent>(event.content.clone())
.map_err(|_| {
Error::bad_database(
"Invalid room history visibility event in database.",
@ -139,7 +139,7 @@ pub async fn get_state_events_for_key_route(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")?
.map(|event| {
serde_json::from_value::<HistoryVisibilityEventContent>(event.content)
serde_json::from_value::<HistoryVisibilityEventContent>(event.content.clone())
.map_err(|_| {
Error::bad_database(
"Invalid room history visibility event in database.",
@ -165,7 +165,7 @@ pub async fn get_state_events_for_key_route(
))?;
Ok(get_state_events_for_key::Response {
content: serde_json::from_value(event.content)
content: serde_json::from_value(event.content.clone())
.map_err(|_| Error::bad_database("Invalid event content in database"))?,
}
.into())
@ -190,7 +190,7 @@ pub async fn get_state_events_for_empty_key_route(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")?
.map(|event| {
serde_json::from_value::<HistoryVisibilityEventContent>(event.content)
serde_json::from_value::<HistoryVisibilityEventContent>(event.content.clone())
.map_err(|_| {
Error::bad_database(
"Invalid room history visibility event in database.",
@ -216,7 +216,7 @@ pub async fn get_state_events_for_empty_key_route(
))?;
Ok(get_state_events_for_key::Response {
content: serde_json::from_value(event.content)
content: serde_json::from_value(event.content.clone())
.map_err(|_| Error::bad_database("Invalid event content in database"))?,
}
.into())

View file

@ -1,21 +1,22 @@
use super::State;
use crate::{ConduitResult, Database, Error, Result, Ruma};
use crate::{ConduitResult, Database, Error, Result, Ruma, RumaResponse};
use log::error;
use ruma::{
api::client::r0::sync::sync_events,
api::client::r0::{sync::sync_events, uiaa::UiaaResponse},
events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType},
serde::Raw,
RoomId, UserId,
DeviceId, RoomId, UserId,
};
#[cfg(feature = "conduit_bin")]
use rocket::{get, tokio};
use std::{
collections::{hash_map, BTreeMap, HashMap, HashSet},
collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto},
sync::Arc,
time::Duration,
};
use tokio::sync::watch::Sender;
#[cfg(feature = "conduit_bin")]
use rocket::{get, tokio};
/// # `GET /_matrix/client/r0/sync`
///
@ -36,21 +37,134 @@ use std::{
pub async fn sync_events_route(
db: State<'_, Arc<Database>>,
body: Ruma<sync_events::Request<'_>>,
) -> ConduitResult<sync_events::Response> {
) -> std::result::Result<RumaResponse<sync_events::Response>, RumaResponse<UiaaResponse>> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut rx = match db
.globals
.sync_receivers
.write()
.unwrap()
.entry((sender_user.clone(), sender_device.clone()))
{
Entry::Vacant(v) => {
let (tx, rx) = tokio::sync::watch::channel(None);
tokio::spawn(sync_helper_wrapper(
Arc::clone(&db),
sender_user.clone(),
sender_device.clone(),
body.since.clone(),
body.full_state,
body.timeout,
tx,
));
v.insert((body.since.clone(), rx)).1.clone()
}
Entry::Occupied(mut o) => {
if o.get().0 != body.since {
let (tx, rx) = tokio::sync::watch::channel(None);
tokio::spawn(sync_helper_wrapper(
Arc::clone(&db),
sender_user.clone(),
sender_device.clone(),
body.since.clone(),
body.full_state,
body.timeout,
tx,
));
o.insert((body.since.clone(), rx.clone()));
rx
} else {
o.get().1.clone()
}
}
};
let we_have_to_wait = rx.borrow().is_none();
if we_have_to_wait {
let _ = rx.changed().await;
}
let result = match rx
.borrow()
.as_ref()
.expect("When sync channel changes it's always set to some")
{
Ok(response) => Ok(response.clone()),
Err(error) => Err(error.to_response()),
};
result
}
pub async fn sync_helper_wrapper(
db: Arc<Database>,
sender_user: UserId,
sender_device: Box<DeviceId>,
since: Option<String>,
full_state: bool,
timeout: Option<Duration>,
tx: Sender<Option<ConduitResult<sync_events::Response>>>,
) {
let r = sync_helper(
Arc::clone(&db),
sender_user.clone(),
sender_device.clone(),
since.clone(),
full_state,
timeout,
)
.await;
if let Ok((_, caching_allowed)) = r {
if !caching_allowed {
match db
.globals
.sync_receivers
.write()
.unwrap()
.entry((sender_user, sender_device))
{
Entry::Occupied(o) => {
// Only remove if the device didn't start a different /sync already
if o.get().0 == since {
o.remove();
}
}
Entry::Vacant(_) => {}
}
}
}
let _ = tx.send(Some(r.map(|(r, _)| r.into())));
}
async fn sync_helper(
db: Arc<Database>,
sender_user: UserId,
sender_device: Box<DeviceId>,
since: Option<String>,
full_state: bool,
timeout: Option<Duration>,
// bool = caching allowed
) -> std::result::Result<(sync_events::Response, bool), Error> {
// TODO: match body.set_presence {
db.rooms.edus.ping_presence(&sender_user)?;
// Setup watchers, so if there's no response, we can wait for them
let watcher = db.watch(sender_user, sender_device);
let watcher = db.watch(&sender_user, &sender_device);
let next_batch = db.globals.current_count()?.to_string();
let next_batch = db.globals.current_count()?;
let next_batch_string = next_batch.to_string();
let mut joined_rooms = BTreeMap::new();
let since = body
.since
let since = since
.clone()
.and_then(|string| string.parse().ok())
.unwrap_or(0);
@ -114,10 +228,11 @@ pub async fn sync_events_route(
// since and the current room state, meaning there should be no updates.
// The inner Option is None when there is an event, but there is no state hash associated
// with it. This can happen for the RoomCreate event, so all updates should arrive.
let first_pdu_before_since = db.rooms.pdus_until(sender_user, &room_id, since).next();
let first_pdu_before_since = db.rooms.pdus_until(&sender_user, &room_id, since).next();
let pdus_after_since = db
.rooms
.pdus_after(sender_user, &room_id, since)
.pdus_after(&sender_user, &room_id, since)
.next()
.is_some();
@ -256,11 +371,11 @@ pub async fn sync_events_route(
.flatten()
.filter(|user_id| {
// Don't send key updates from the sender to the sender
sender_user != user_id
&sender_user != user_id
})
.filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target already
!share_encrypted_room(&db, sender_user, user_id, &room_id)
!share_encrypted_room(&db, &sender_user, user_id, &room_id)
.unwrap_or(false)
}),
);
@ -335,7 +450,7 @@ pub async fn sync_events_route(
let state_events = if joined_since_last_sync {
current_state
.into_iter()
.iter()
.map(|(_, pdu)| pdu.to_sync_state_event())
.collect()
} else {
@ -520,7 +635,7 @@ pub async fn sync_events_route(
account_data: sync_events::RoomAccountData { events: Vec::new() },
timeline: sync_events::Timeline {
limited: false,
prev_batch: Some(next_batch.clone()),
prev_batch: Some(next_batch_string.clone()),
events: Vec::new(),
},
state: sync_events::State {
@ -573,10 +688,10 @@ pub async fn sync_events_route(
// Remove all to-device events the device received *last time*
db.users
.remove_to_device_events(sender_user, sender_device, since)?;
.remove_to_device_events(&sender_user, &sender_device, since)?;
let response = sync_events::Response {
next_batch,
next_batch: next_batch_string,
rooms: sync_events::Rooms {
leave: left_rooms,
join: joined_rooms,
@ -604,20 +719,22 @@ pub async fn sync_events_route(
changed: device_list_updates.into_iter().collect(),
left: device_list_left.into_iter().collect(),
},
device_one_time_keys_count: if db.users.last_one_time_keys_update(sender_user)? > since
device_one_time_keys_count: if db.users.last_one_time_keys_update(&sender_user)? > since
|| since == 0
{
db.users.count_one_time_keys(sender_user, sender_device)?
db.users.count_one_time_keys(&sender_user, &sender_device)?
} else {
BTreeMap::new()
},
to_device: sync_events::ToDevice {
events: db.users.get_to_device_events(sender_user, sender_device)?,
events: db
.users
.get_to_device_events(&sender_user, &sender_device)?,
},
};
// TODO: Retry the endpoint instead of returning (waiting for #118)
if !body.full_state
if !full_state
&& response.rooms.is_empty()
&& response.presence.is_empty()
&& response.account_data.is_empty()
@ -627,14 +744,15 @@ pub async fn sync_events_route(
{
// Hang a few seconds so requests are not spammed
// Stop hanging if new info arrives
let mut duration = body.timeout.unwrap_or_default();
let mut duration = timeout.unwrap_or_default();
if duration.as_secs() > 30 {
duration = Duration::from_secs(30);
}
let _ = tokio::time::timeout(duration, watcher).await;
Ok((response, false))
} else {
Ok((response, since != next_batch)) // Only cache if we made progress
}
Ok(response.into())
}
#[tracing::instrument(skip(db))]