Merge branch 'next' into 'room-v11'

# Conflicts:
#   src/service/rooms/timeline/mod.rs
#   src/utils/error.rs
This commit is contained in:
Val Lorentz 2024-04-11 17:34:42 +00:00
commit b5e21f761b
109 changed files with 4239 additions and 3708 deletions

View file

@ -1,13 +1,14 @@
use std::{
collections::BTreeMap,
convert::{TryFrom, TryInto},
sync::{Arc, RwLock},
sync::Arc,
time::Instant,
};
use clap::Parser;
use regex::Regex;
use ruma::{
api::appservice::Registration,
events::{
room::{
canonical_alias::RoomCanonicalAliasEventContent,
@ -23,10 +24,10 @@ use ruma::{
},
TimelineEventType,
},
EventId, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
};
use serde_json::value::to_raw_value;
use tokio::sync::{mpsc, Mutex, MutexGuard};
use tokio::sync::{mpsc, Mutex, RwLock};
use crate::{
api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH},
@ -50,7 +51,7 @@ enum AdminCommand {
/// Registering a new bridge using the ID of an existing bridge will replace
/// the old one.
///
/// [commandbody]
/// [commandbody]()
/// # ```
/// # yaml content here
/// # ```
@ -96,7 +97,7 @@ enum AdminCommand {
/// Removing a mass amount of users from a room may cause a significant amount of leave events.
/// The time to leave rooms may depend significantly on joined rooms and servers.
///
/// [commandbody]
/// [commandbody]()
/// # ```
/// # User list here
/// # ```
@ -121,7 +122,7 @@ enum AdminCommand {
/// The PDU event is only checked for validity and is not added to the
/// database.
///
/// [commandbody]
/// [commandbody]()
/// # ```
/// # PDU json content here
/// # ```
@ -165,14 +166,14 @@ enum AdminCommand {
EnableRoom { room_id: Box<RoomId> },
/// Verify json signatures
/// [commandbody]
/// [commandbody]()
/// # ```
/// # json here
/// # ```
SignJson,
/// Verify json signatures
/// [commandbody]
/// [commandbody]()
/// # ```
/// # json here
/// # ```
@ -214,60 +215,44 @@ impl Service {
let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name()))
.expect("@conduit:server_name is valid");
let conduit_room = services()
.rooms
.alias
.resolve_local_alias(
format!("#admins:{}", services().globals.server_name())
.as_str()
.try_into()
.expect("#admins:server_name is a valid room alias"),
)
.expect("Database data for admin room alias must be valid")
.expect("Admin room must exist");
if let Ok(Some(conduit_room)) = services().admin.get_admin_room() {
loop {
tokio::select! {
Some(event) = receiver.recv() => {
let message_content = match event {
AdminRoomEvent::SendMessage(content) => content,
AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await
};
let send_message = |message: RoomMessageEventContent, mutex_lock: &MutexGuard<'_, ()>| {
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMessage,
content: to_raw_value(&message)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: None,
redacts: None,
},
&conduit_user,
&conduit_room,
mutex_lock,
)
.unwrap();
};
let mutex_state = Arc::clone(
services().globals
.roomid_mutex_state
.write()
.await
.entry(conduit_room.to_owned())
.or_default(),
);
loop {
tokio::select! {
Some(event) = receiver.recv() => {
let message_content = match event {
AdminRoomEvent::SendMessage(content) => content,
AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await
};
let state_lock = mutex_state.lock().await;
let mutex_state = Arc::clone(
services().globals
.roomid_mutex_state
.write()
.unwrap()
.entry(conduit_room.to_owned())
.or_default(),
);
let state_lock = mutex_state.lock().await;
send_message(message_content, &state_lock);
drop(state_lock);
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMessage,
content: to_raw_value(&message_content)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: None,
redacts: None,
},
&conduit_user,
&conduit_room,
&state_lock,
)
.await.unwrap();
}
}
}
}
@ -351,10 +336,9 @@ impl Service {
if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```"
{
let appservice_config = body[1..body.len() - 1].join("\n");
let parsed_config =
serde_yaml::from_str::<serde_yaml::Value>(&appservice_config);
let parsed_config = serde_yaml::from_str::<Registration>(&appservice_config);
match parsed_config {
Ok(yaml) => match services().appservice.register_appservice(yaml) {
Ok(yaml) => match services().appservice.register_appservice(yaml).await {
Ok(id) => RoomMessageEventContent::text_plain(format!(
"Appservice registered with ID: {id}."
)),
@ -377,6 +361,7 @@ impl Service {
} => match services()
.appservice
.unregister_appservice(&appservice_identifier)
.await
{
Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."),
Err(e) => RoomMessageEventContent::text_plain(format!(
@ -384,25 +369,13 @@ impl Service {
)),
},
AdminCommand::ListAppservices => {
if let Ok(appservices) = services()
.appservice
.iter_ids()
.map(|ids| ids.collect::<Vec<_>>())
{
let count = appservices.len();
let output = format!(
"Appservices ({}): {}",
count,
appservices
.into_iter()
.filter_map(|r| r.ok())
.collect::<Vec<_>>()
.join(", ")
);
RoomMessageEventContent::text_plain(output)
} else {
RoomMessageEventContent::text_plain("Failed to get appservices.")
}
let appservices = services().appservice.iter_ids().await;
let output = format!(
"Appservices ({}): {}",
appservices.len(),
appservices.join(", ")
);
RoomMessageEventContent::text_plain(output)
}
AdminCommand::ListRooms => {
let room_ids = services().rooms.metadata.iter_ids();
@ -434,11 +407,7 @@ impl Service {
Err(e) => RoomMessageEventContent::text_plain(e.to_string()),
},
AdminCommand::IncomingFederation => {
let map = services()
.globals
.roomid_federationhandletime
.read()
.unwrap();
let map = services().globals.roomid_federationhandletime.read().await;
let mut msg: String = format!("Handling {} incoming pdus:\n", map.len());
for (r, (e, i)) in map.iter() {
@ -552,7 +521,7 @@ impl Service {
}
}
AdminCommand::MemoryUsage => {
let response1 = services().memory_usage();
let response1 = services().memory_usage().await;
let response2 = services().globals.db.memory_usage();
RoomMessageEventContent::text_plain(format!(
@ -565,7 +534,7 @@ impl Service {
RoomMessageEventContent::text_plain("Done.")
}
AdminCommand::ClearServiceCaches { amount } => {
services().clear_caches(amount);
services().clear_caches(amount).await;
RoomMessageEventContent::text_plain("Done.")
}
@ -586,6 +555,13 @@ impl Service {
}
};
// Checks if user is local
if user_id.server_name() != services().globals.server_name() {
return Ok(RoomMessageEventContent::text_plain(
"The specified user is not from this server!",
));
};
// Check if the specified user is valid
if !services().users.exists(&user_id)?
|| user_id
@ -689,7 +665,15 @@ impl Service {
user_id,
} => {
let user_id = Arc::<UserId>::from(user_id);
if services().users.exists(&user_id)? {
if !services().users.exists(&user_id)? {
RoomMessageEventContent::text_plain(format!(
"User {user_id} doesn't exist on this server"
))
} else if user_id.server_name() != services().globals.server_name() {
RoomMessageEventContent::text_plain(format!(
"User {user_id} is not from this server"
))
} else {
RoomMessageEventContent::text_plain(format!(
"Making {user_id} leave all rooms before deactivation..."
));
@ -703,30 +687,76 @@ impl Service {
RoomMessageEventContent::text_plain(format!(
"User {user_id} has been deactivated"
))
} else {
RoomMessageEventContent::text_plain(format!(
"User {user_id} doesn't exist on this server"
))
}
}
AdminCommand::DeactivateAll { leave_rooms, force } => {
if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```"
{
let usernames = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
let users = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
let mut user_ids: Vec<&UserId> = Vec::new();
let mut user_ids = Vec::new();
let mut remote_ids = Vec::new();
let mut non_existant_ids = Vec::new();
let mut invalid_users = Vec::new();
for &username in &usernames {
match <&UserId>::try_from(username) {
Ok(user_id) => user_ids.push(user_id),
for &user in &users {
match <&UserId>::try_from(user) {
Ok(user_id) => {
if user_id.server_name() != services().globals.server_name() {
remote_ids.push(user_id)
} else if !services().users.exists(user_id)? {
non_existant_ids.push(user_id)
} else {
user_ids.push(user_id)
}
}
Err(_) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"{username} is not a valid username"
)))
invalid_users.push(user);
}
}
}
let mut markdown_message = String::new();
let mut html_message = String::new();
if !invalid_users.is_empty() {
markdown_message.push_str("The following user ids are not valid:\n```\n");
html_message.push_str("The following user ids are not valid:\n<pre>\n");
for invalid_user in invalid_users {
markdown_message.push_str(&format!("{invalid_user}\n"));
html_message.push_str(&format!("{invalid_user}\n"));
}
markdown_message.push_str("```\n\n");
html_message.push_str("</pre>\n\n");
}
if !remote_ids.is_empty() {
markdown_message
.push_str("The following users are not from this server:\n```\n");
html_message
.push_str("The following users are not from this server:\n<pre>\n");
for remote_id in remote_ids {
markdown_message.push_str(&format!("{remote_id}\n"));
html_message.push_str(&format!("{remote_id}\n"));
}
markdown_message.push_str("```\n\n");
html_message.push_str("</pre>\n\n");
}
if !non_existant_ids.is_empty() {
markdown_message.push_str("The following users do not exist:\n```\n");
html_message.push_str("The following users do not exist:\n<pre>\n");
for non_existant_id in non_existant_ids {
markdown_message.push_str(&format!("{non_existant_id}\n"));
html_message.push_str(&format!("{non_existant_id}\n"));
}
markdown_message.push_str("```\n\n");
html_message.push_str("</pre>\n\n");
}
if !markdown_message.is_empty() {
return Ok(RoomMessageEventContent::text_html(
markdown_message,
html_message,
));
}
let mut deactivation_count = 0;
let mut admins = Vec::new();
@ -806,7 +836,7 @@ impl Service {
.fetch_required_signing_keys(&value, &pub_key_map)
.await?;
let pub_key_map = pub_key_map.read().unwrap();
let pub_key_map = pub_key_map.read().await;
match ruma::signatures::verify_json(&pub_key_map, &value) {
Ok(_) => RoomMessageEventContent::text_plain("Signature correct"),
Err(e) => RoomMessageEventContent::text_plain(format!(
@ -858,12 +888,15 @@ impl Service {
.expect("Regex compilation should not fail");
let text = re.replace_all(&text, "<code>$1</code>: $4");
// Look for a `[commandbody]` tag. If it exists, use all lines below it that
// Look for a `[commandbody]()` tag. If it exists, use all lines below it that
// start with a `#` in the USAGE section.
let mut text_lines: Vec<&str> = text.lines().collect();
let mut command_body = String::new();
if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") {
if let Some(line_index) = text_lines
.iter()
.position(|line| *line == "[commandbody]()")
{
text_lines.remove(line_index);
while text_lines
@ -919,7 +952,7 @@ impl Service {
.globals
.roomid_mutex_state
.write()
.unwrap()
.await
.entry(room_id.clone())
.or_default(),
);
@ -952,170 +985,223 @@ impl Service {
content.room_version = room_version;
// 1. The room create event
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomCreate,
content: to_raw_value(&content).expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)?;
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomCreate,
content: to_raw_value(&content).expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
// 2. Make conduit bot join
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
displayname: None,
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(conduit_user.to_string()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)?;
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
displayname: None,
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(conduit_user.to_string()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
// 3. Power levels
let mut users = BTreeMap::new();
users.insert(conduit_user.clone(), 100.into());
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&RoomPowerLevelsEventContent {
users,
..Default::default()
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)?;
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&RoomPowerLevelsEventContent {
users,
..Default::default()
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
// 4.1 Join Rules
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomJoinRules,
content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)?;
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomJoinRules,
content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
// 4.2 History Visibility
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomHistoryVisibility,
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
HistoryVisibility::Shared,
))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)?;
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomHistoryVisibility,
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
HistoryVisibility::Shared,
))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
// 4.3 Guest Access
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomGuestAccess,
content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden))
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomGuestAccess,
content: to_raw_value(&RoomGuestAccessEventContent::new(
GuestAccess::Forbidden,
))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)?;
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
// 5. Events implied by name and topic
let room_name = format!("{} Admin Room", services().globals.server_name());
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomName,
content: to_raw_value(&RoomNameEventContent::new(Some(room_name)))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)?;
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomName,
content: to_raw_value(&RoomNameEventContent::new(room_name))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomTopic,
content: to_raw_value(&RoomTopicEventContent {
topic: format!("Manage {}", services().globals.server_name()),
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)?;
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomTopic,
content: to_raw_value(&RoomTopicEventContent {
topic: format!("Manage {}", services().globals.server_name()),
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
// 6. Room alias
let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomCanonicalAlias,
content: to_raw_value(&RoomCanonicalAliasEventContent {
alias: Some(alias.clone()),
alt_aliases: Vec::new(),
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)?;
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomCanonicalAlias,
content: to_raw_value(&RoomCanonicalAliasEventContent {
alias: Some(alias.clone()),
alt_aliases: Vec::new(),
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
services().rooms.alias.set_alias(&alias, &room_id)?;
Ok(())
}
/// Gets the room ID of the admin room
///
/// Errors are propagated from the database, and will have None if there is no admin room
pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> {
let admin_room_alias: Box<RoomAliasId> =
format!("#admins:{}", services().globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
services()
.rooms
.alias
.resolve_local_alias(&admin_room_alias)
}
/// Invite the user to the conduit admin room.
///
/// In conduit, this is equivalent to granting admin privileges.
@ -1124,102 +1210,105 @@ impl Service {
user_id: &UserId,
displayname: String,
) -> Result<()> {
let admin_room_alias: Box<RoomAliasId> =
format!("#admins:{}", services().globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
let room_id = services()
.rooms
.alias
.resolve_local_alias(&admin_room_alias)?
.expect("Admin room must exist");
if let Some(room_id) = services().admin.get_admin_room()? {
let mutex_state = Arc::clone(
services()
.globals
.roomid_mutex_state
.write()
.await
.entry(room_id.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
let mutex_state = Arc::clone(
// Use the server user to grant the new admin's power level
let conduit_user =
UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("@conduit:server_name is valid");
// Invite and join the real user
services()
.globals
.roomid_mutex_state
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Invite,
displayname: None,
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(user_id.to_string()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
displayname: Some(displayname),
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(user_id.to_string()),
redacts: None,
},
user_id,
&room_id,
&state_lock,
)
.await?;
// Use the server user to grant the new admin's power level
let conduit_user =
UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("@conduit:server_name is valid");
// Set power level
let mut users = BTreeMap::new();
users.insert(conduit_user.to_owned(), 100.into());
users.insert(user_id.to_owned(), 100.into());
// Invite and join the real user
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Invite,
displayname: None,
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(user_id.to_string()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)?;
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
displayname: Some(displayname),
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(user_id.to_string()),
redacts: None,
},
user_id,
&room_id,
&state_lock,
)?;
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&RoomPowerLevelsEventContent {
users,
..Default::default()
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
// Set power level
let mut users = BTreeMap::new();
users.insert(conduit_user.to_owned(), 100.into());
users.insert(user_id.to_owned(), 100.into());
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&RoomPowerLevelsEventContent {
users,
..Default::default()
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)?;
// Send welcome message
services().rooms.timeline.build_and_append_pdu(
// Send welcome message
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMessage,
content: to_raw_value(&RoomMessageEventContent::text_html(
@ -1234,8 +1323,8 @@ impl Service {
&conduit_user,
&room_id,
&state_lock,
)?;
).await?;
}
Ok(())
}
}

View file

@ -1,8 +1,10 @@
use ruma::api::appservice::Registration;
use crate::Result;
pub trait Data: Send + Sync {
/// Registers an appservice and returns the ID to the caller
fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String>;
fn register_appservice(&self, yaml: Registration) -> Result<String>;
/// Remove an appservice registration
///
@ -11,9 +13,9 @@ pub trait Data: Send + Sync {
/// * `service_name` - the name you send to register the service previously
fn unregister_appservice(&self, service_name: &str) -> Result<()>;
fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>>;
fn get_registration(&self, id: &str) -> Result<Option<Registration>>;
fn iter_ids<'a>(&'a self) -> Result<Box<dyn Iterator<Item = Result<String>> + 'a>>;
fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>>;
fn all(&self) -> Result<Vec<(String, Registration)>>;
}

View file

@ -1,37 +1,184 @@
mod data;
use std::collections::BTreeMap;
pub use data::Data;
use crate::Result;
use futures_util::Future;
use regex::RegexSet;
use ruma::api::appservice::{Namespace, Registration};
use tokio::sync::RwLock;
use crate::{services, Result};
/// Compiled regular expressions for a namespace.
#[derive(Clone, Debug)]
pub struct NamespaceRegex {
pub exclusive: Option<RegexSet>,
pub non_exclusive: Option<RegexSet>,
}
impl NamespaceRegex {
/// Checks if this namespace has rights to a namespace
pub fn is_match(&self, heystack: &str) -> bool {
if self.is_exclusive_match(heystack) {
return true;
}
if let Some(non_exclusive) = &self.non_exclusive {
if non_exclusive.is_match(heystack) {
return true;
}
}
false
}
/// Checks if this namespace has exlusive rights to a namespace
pub fn is_exclusive_match(&self, heystack: &str) -> bool {
if let Some(exclusive) = &self.exclusive {
if exclusive.is_match(heystack) {
return true;
}
}
false
}
}
impl TryFrom<Vec<Namespace>> for NamespaceRegex {
fn try_from(value: Vec<Namespace>) -> Result<Self, regex::Error> {
let mut exclusive = vec![];
let mut non_exclusive = vec![];
for namespace in value {
if namespace.exclusive {
exclusive.push(namespace.regex);
} else {
non_exclusive.push(namespace.regex);
}
}
Ok(NamespaceRegex {
exclusive: if exclusive.is_empty() {
None
} else {
Some(RegexSet::new(exclusive)?)
},
non_exclusive: if non_exclusive.is_empty() {
None
} else {
Some(RegexSet::new(non_exclusive)?)
},
})
}
type Error = regex::Error;
}
/// Appservice registration combined with its compiled regular expressions.
#[derive(Clone, Debug)]
pub struct RegistrationInfo {
pub registration: Registration,
pub users: NamespaceRegex,
pub aliases: NamespaceRegex,
pub rooms: NamespaceRegex,
}
impl TryFrom<Registration> for RegistrationInfo {
fn try_from(value: Registration) -> Result<RegistrationInfo, regex::Error> {
Ok(RegistrationInfo {
users: value.namespaces.users.clone().try_into()?,
aliases: value.namespaces.aliases.clone().try_into()?,
rooms: value.namespaces.rooms.clone().try_into()?,
registration: value,
})
}
type Error = regex::Error;
}
pub struct Service {
pub db: &'static dyn Data,
registration_info: RwLock<BTreeMap<String, RegistrationInfo>>,
}
impl Service {
/// Registers an appservice and returns the ID to the caller
pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> {
pub fn build(db: &'static dyn Data) -> Result<Self> {
let mut registration_info = BTreeMap::new();
// Inserting registrations into cache
for appservice in db.all()? {
registration_info.insert(
appservice.0,
appservice
.1
.try_into()
.expect("Should be validated on registration"),
);
}
Ok(Self {
db,
registration_info: RwLock::new(registration_info),
})
}
/// Registers an appservice and returns the ID to the caller.
pub async fn register_appservice(&self, yaml: Registration) -> Result<String> {
services()
.appservice
.registration_info
.write()
.await
.insert(yaml.id.clone(), yaml.clone().try_into()?);
self.db.register_appservice(yaml)
}
/// Remove an appservice registration
/// Removes an appservice registration.
///
/// # Arguments
///
/// * `service_name` - the name you send to register the service previously
pub fn unregister_appservice(&self, service_name: &str) -> Result<()> {
pub async fn unregister_appservice(&self, service_name: &str) -> Result<()> {
services()
.appservice
.registration_info
.write()
.await
.remove(service_name)
.ok_or_else(|| crate::Error::AdminCommand("Appservice not found"))?;
self.db.unregister_appservice(service_name)
}
pub fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> {
self.db.get_registration(id)
pub async fn get_registration(&self, id: &str) -> Option<Registration> {
self.registration_info
.read()
.await
.get(id)
.cloned()
.map(|info| info.registration)
}
pub fn iter_ids(&self) -> Result<impl Iterator<Item = Result<String>> + '_> {
self.db.iter_ids()
pub async fn iter_ids(&self) -> Vec<String> {
self.registration_info
.read()
.await
.keys()
.cloned()
.collect()
}
pub fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>> {
self.db.all()
pub async fn find_from_token(&self, token: &str) -> Option<RegistrationInfo> {
self.read()
.await
.values()
.find(|info| info.registration.as_token == token)
.cloned()
}
pub fn read(
&self,
) -> impl Future<Output = tokio::sync::RwLockReadGuard<'_, BTreeMap<String, RegistrationInfo>>>
{
self.registration_info.read()
}
}

View file

@ -8,6 +8,12 @@ use ruma::{
use crate::api::server_server::FedDest;
use crate::{services, Config, Error, Result};
use futures_util::FutureExt;
use hyper::{
client::connect::dns::{GaiResolver, Name},
service::Service as HyperService,
};
use reqwest::dns::{Addrs, Resolve, Resolving};
use ruma::{
api::{
client::sync::sync_events,
@ -17,17 +23,19 @@ use ruma::{
};
use std::{
collections::{BTreeMap, HashMap},
error::Error as StdError,
fs,
future::Future,
future::{self, Future},
iter,
net::{IpAddr, SocketAddr},
path::PathBuf,
sync::{
atomic::{self, AtomicBool},
Arc, Mutex, RwLock,
Arc, RwLock as StdRwLock,
},
time::{Duration, Instant},
};
use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore};
use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore};
use tracing::{error, info};
use trust_dns_resolver::TokioAsyncResolver;
@ -45,7 +53,7 @@ pub struct Service {
pub db: &'static dyn Data,
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
pub tls_name_override: Arc<RwLock<TlsNameMap>>,
pub tls_name_override: Arc<StdRwLock<TlsNameMap>>,
pub config: Config,
keypair: Arc<ruma::signatures::Ed25519KeyPair>,
dns_resolver: TokioAsyncResolver,
@ -60,8 +68,8 @@ pub struct Service {
pub servername_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, Arc<Semaphore>>>>,
pub sync_receivers: RwLock<HashMap<(OwnedUserId, OwnedDeviceId), SyncHandle>>,
pub roomid_mutex_insert: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<TokioMutex<()>>>>,
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<TokioMutex<()>>>>, // this lock will be held longer
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
pub stateres_mutex: Arc<Mutex<()>>,
pub rotate: RotationHandler,
@ -99,6 +107,45 @@ impl Default for RotationHandler {
}
}
pub struct Resolver {
inner: GaiResolver,
overrides: Arc<StdRwLock<TlsNameMap>>,
}
impl Resolver {
pub fn new(overrides: Arc<StdRwLock<TlsNameMap>>) -> Self {
Resolver {
inner: GaiResolver::new(),
overrides,
}
}
}
impl Resolve for Resolver {
fn resolve(&self, name: Name) -> Resolving {
self.overrides
.read()
.unwrap()
.get(name.as_str())
.and_then(|(override_name, port)| {
override_name.first().map(|first_name| {
let x: Box<dyn Iterator<Item = SocketAddr> + Send> =
Box::new(iter::once(SocketAddr::new(*first_name, *port)));
let x: Resolving = Box::pin(future::ready(Ok(x)));
x
})
})
.unwrap_or_else(|| {
let this = &mut self.inner.clone();
Box::pin(HyperService::<Name>::call(this, name).map(|result| {
result
.map(|addrs| -> Addrs { Box::new(addrs) })
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
}))
})
}
}
impl Service {
pub fn load(db: &'static dyn Data, config: Config) -> Result<Self> {
let keypair = db.load_keypair();
@ -112,7 +159,7 @@ impl Service {
}
};
let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new()));
let tls_name_override = Arc::new(StdRwLock::new(TlsNameMap::new()));
let jwt_decoding_key = config
.jwt_secret
@ -120,14 +167,8 @@ impl Service {
.map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()));
let default_client = reqwest_client_builder(&config)?.build()?;
let name_override = Arc::clone(&tls_name_override);
let federation_client = reqwest_client_builder(&config)?
.resolve_fn(move |domain| {
let read_guard = name_override.read().unwrap();
let (override_name, port) = read_guard.get(&domain)?;
let first_name = override_name.get(0)?;
Some(SocketAddr::new(*first_name, *port))
})
.dns_resolver(Arc::new(Resolver::new(tls_name_override.clone())))
.build()?;
// Supported and stable room versions

View file

@ -1,11 +1,13 @@
use std::{
collections::{BTreeMap, HashMap},
sync::{Arc, Mutex},
sync::{Arc, Mutex as StdMutex},
};
use lru_cache::LruCache;
use tokio::sync::{broadcast, Mutex};
use crate::{Config, Result};
use tokio::sync::RwLock;
pub mod account_data;
pub mod admin;
@ -55,7 +57,7 @@ impl Services {
config: Config,
) -> Result<Self> {
Ok(Self {
appservice: appservice::Service { db },
appservice: appservice::Service::build(db)?,
pusher: pusher::Service { db },
rooms: rooms::Service {
alias: rooms::alias::Service { db },
@ -64,7 +66,11 @@ impl Services {
edus: rooms::edus::Service {
presence: rooms::edus::presence::Service { db },
read_receipt: rooms::edus::read_receipt::Service { db },
typing: rooms::edus::typing::Service { db },
typing: rooms::edus::typing::Service {
typing: RwLock::new(BTreeMap::new()),
last_typing_update: RwLock::new(BTreeMap::new()),
typing_update_sender: broadcast::channel(100).0,
},
},
event_handler: rooms::event_handler::Service,
lazy_loading: rooms::lazy_loading::Service {
@ -79,17 +85,17 @@ impl Services {
state: rooms::state::Service { db },
state_accessor: rooms::state_accessor::Service {
db,
server_visibility_cache: Mutex::new(LruCache::new(
server_visibility_cache: StdMutex::new(LruCache::new(
(100.0 * config.conduit_cache_capacity_modifier) as usize,
)),
user_visibility_cache: Mutex::new(LruCache::new(
user_visibility_cache: StdMutex::new(LruCache::new(
(100.0 * config.conduit_cache_capacity_modifier) as usize,
)),
},
state_cache: rooms::state_cache::Service { db },
state_compressor: rooms::state_compressor::Service {
db,
stateinfo_cache: Mutex::new(LruCache::new(
stateinfo_cache: StdMutex::new(LruCache::new(
(100.0 * config.conduit_cache_capacity_modifier) as usize,
)),
},
@ -107,7 +113,7 @@ impl Services {
uiaa: uiaa::Service { db },
users: users::Service {
db,
connections: Mutex::new(BTreeMap::new()),
connections: StdMutex::new(BTreeMap::new()),
},
account_data: account_data::Service { db },
admin: admin::Service::build(),
@ -118,14 +124,8 @@ impl Services {
globals: globals::Service::load(db, config)?,
})
}
fn memory_usage(&self) -> String {
let lazy_load_waiting = self
.rooms
.lazy_loading
.lazy_load_waiting
.lock()
.unwrap()
.len();
async fn memory_usage(&self) -> String {
let lazy_load_waiting = self.rooms.lazy_loading.lazy_load_waiting.lock().await.len();
let server_visibility_cache = self
.rooms
.state_accessor
@ -152,15 +152,9 @@ impl Services {
.timeline
.lasttimelinecount_cache
.lock()
.unwrap()
.len();
let roomid_spacechunk_cache = self
.rooms
.spaces
.roomid_spacechunk_cache
.lock()
.unwrap()
.await
.len();
let roomid_spacechunk_cache = self.rooms.spaces.roomid_spacechunk_cache.lock().await.len();
format!(
"\
@ -173,13 +167,13 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\
"
)
}
fn clear_caches(&self, amount: u32) {
async fn clear_caches(&self, amount: u32) {
if amount > 0 {
self.rooms
.lazy_loading
.lazy_load_waiting
.lock()
.unwrap()
.await
.clear();
}
if amount > 1 {
@ -211,7 +205,7 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\
.timeline
.lasttimelinecount_cache
.lock()
.unwrap()
.await
.clear();
}
if amount > 5 {
@ -219,7 +213,7 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\
.spaces
.roomid_spacechunk_cache
.lock()
.unwrap()
.await
.clear();
}
}

View file

@ -365,7 +365,7 @@ impl PartialEq for PduEvent {
}
impl PartialOrd for PduEvent {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.event_id.partial_cmp(&other.event_id)
Some(self.cmp(other))
}
}
impl Ord for PduEvent {

View file

@ -1,6 +1,6 @@
mod data;
pub use data::Data;
use ruma::events::AnySyncTimelineEvent;
use ruma::{events::AnySyncTimelineEvent, push::PushConditionPowerLevelsCtx};
use crate::{services, Error, PduEvent, Result};
use bytes::BytesMut;
@ -66,8 +66,7 @@ impl Service {
})?
.map(|body| body.freeze());
let reqwest_request = reqwest::Request::try_from(http_request)
.expect("all http requests are valid reqwest requests");
let reqwest_request = reqwest::Request::try_from(http_request)?;
// TODO: we could keep this very short and let expo backoff do it's thing...
//*reqwest_request.timeout_mut() = Some(Duration::from_secs(5));
@ -193,6 +192,12 @@ impl Service {
pdu: &Raw<AnySyncTimelineEvent>,
room_id: &RoomId,
) -> Result<&'a [Action]> {
let power_levels = PushConditionPowerLevelsCtx {
users: power_levels.users.clone(),
users_default: power_levels.users_default,
notifications: power_levels.notifications.clone(),
};
let ctx = PushConditionRoomCtx {
room_id: room_id.to_owned(),
member_count: 10_u32.into(), // TODO: get member count efficiently
@ -201,9 +206,7 @@ impl Service {
.users
.displayname(user)?
.unwrap_or_else(|| user.localpart().to_owned()),
users_power_levels: power_levels.users.clone(),
default_power_level: power_levels.users_default,
notification_power_levels: power_levels.notifications.clone(),
power_levels: Some(power_levels),
};
Ok(ruleset.get_actions(pdu, &ctx))

View file

@ -2,7 +2,7 @@ pub mod presence;
pub mod read_receipt;
pub mod typing;
pub trait Data: presence::Data + read_receipt::Data + typing::Data + 'static {}
pub trait Data: presence::Data + read_receipt::Data + 'static {}
pub struct Service {
pub presence: presence::Service,

View file

@ -17,29 +17,32 @@ impl Service {
/// make sure users outside these rooms can't see them.
pub fn update_presence(
&self,
user_id: &UserId,
room_id: &RoomId,
presence: PresenceEvent,
_user_id: &UserId,
_room_id: &RoomId,
_presence: PresenceEvent,
) -> Result<()> {
self.db.update_presence(user_id, room_id, presence)
// self.db.update_presence(user_id, room_id, presence)
Ok(())
}
/// Resets the presence timeout, so the user will stay in their current presence state.
pub fn ping_presence(&self, user_id: &UserId) -> Result<()> {
self.db.ping_presence(user_id)
pub fn ping_presence(&self, _user_id: &UserId) -> Result<()> {
// self.db.ping_presence(user_id)
Ok(())
}
pub fn get_last_presence_event(
&self,
user_id: &UserId,
room_id: &RoomId,
_user_id: &UserId,
_room_id: &RoomId,
) -> Result<Option<PresenceEvent>> {
let last_update = match self.db.last_presence_update(user_id)? {
Some(last) => last,
None => return Ok(None),
};
// let last_update = match self.db.last_presence_update(user_id)? {
// Some(last) => last,
// None => return Ok(None),
// };
self.db.get_presence_event(room_id, user_id, last_update)
// self.db.get_presence_event(room_id, user_id, last_update)
Ok(None)
}
/* TODO
@ -111,12 +114,12 @@ impl Service {
}*/
/// Returns the most recent presence updates that happened after the event with id `since`.
#[tracing::instrument(skip(self, since, room_id))]
pub fn presence_since(
&self,
room_id: &RoomId,
since: u64,
_room_id: &RoomId,
_since: u64,
) -> Result<HashMap<OwnedUserId, PresenceEvent>> {
self.db.presence_since(room_id, since)
// self.db.presence_since(room_id, since)
Ok(HashMap::new())
}
}

View file

@ -11,6 +11,7 @@ pub trait Data: Send + Sync {
) -> Result<()>;
/// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`.
#[allow(clippy::type_complexity)]
fn readreceipts_since<'a>(
&'a self,
room_id: &RoomId,

View file

@ -1,21 +0,0 @@
use crate::Result;
use ruma::{OwnedUserId, RoomId, UserId};
use std::collections::HashSet;
pub trait Data: Send + Sync {
/// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is
/// called.
fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()>;
/// Removes a user from typing before the timeout is reached.
fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>;
/// Makes sure that typing events with old timestamps get removed.
fn typings_maintain(&self, room_id: &RoomId) -> Result<()>;
/// Returns the count of the last typing update in this room.
fn last_typing_update(&self, room_id: &RoomId) -> Result<u64>;
/// Returns all user ids currently typing.
fn typings_all(&self, room_id: &RoomId) -> Result<HashSet<OwnedUserId>>;
}

View file

@ -1,48 +1,117 @@
mod data;
use ruma::{events::SyncEphemeralRoomEvent, OwnedRoomId, OwnedUserId, RoomId, UserId};
use std::collections::BTreeMap;
use tokio::sync::{broadcast, RwLock};
pub use data::Data;
use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId};
use crate::Result;
use crate::{services, utils, Result};
pub struct Service {
pub db: &'static dyn Data,
pub typing: RwLock<BTreeMap<OwnedRoomId, BTreeMap<OwnedUserId, u64>>>, // u64 is unix timestamp of timeout
pub last_typing_update: RwLock<BTreeMap<OwnedRoomId, u64>>, // timestamp of the last change to typing users
pub typing_update_sender: broadcast::Sender<OwnedRoomId>,
}
impl Service {
/// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is
/// called.
pub fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
self.db.typing_add(user_id, room_id, timeout)
pub async fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
self.typing
.write()
.await
.entry(room_id.to_owned())
.or_default()
.insert(user_id.to_owned(), timeout);
self.last_typing_update
.write()
.await
.insert(room_id.to_owned(), services().globals.next_count()?);
let _ = self.typing_update_sender.send(room_id.to_owned());
Ok(())
}
/// Removes a user from typing before the timeout is reached.
pub fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
self.db.typing_remove(user_id, room_id)
pub async fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
self.typing
.write()
.await
.entry(room_id.to_owned())
.or_default()
.remove(user_id);
self.last_typing_update
.write()
.await
.insert(room_id.to_owned(), services().globals.next_count()?);
let _ = self.typing_update_sender.send(room_id.to_owned());
Ok(())
}
pub async fn wait_for_update(&self, room_id: &RoomId) -> Result<()> {
let mut receiver = self.typing_update_sender.subscribe();
while let Ok(next) = receiver.recv().await {
if next == room_id {
break;
}
}
Ok(())
}
/// Makes sure that typing events with old timestamps get removed.
fn typings_maintain(&self, room_id: &RoomId) -> Result<()> {
self.db.typings_maintain(room_id)
async fn typings_maintain(&self, room_id: &RoomId) -> Result<()> {
let current_timestamp = utils::millis_since_unix_epoch();
let mut removable = Vec::new();
{
let typing = self.typing.read().await;
let Some(room) = typing.get(room_id) else {
return Ok(());
};
for (user, timeout) in room {
if *timeout < current_timestamp {
removable.push(user.clone());
}
}
drop(typing);
}
if !removable.is_empty() {
let typing = &mut self.typing.write().await;
let room = typing.entry(room_id.to_owned()).or_default();
for user in removable {
room.remove(&user);
}
self.last_typing_update
.write()
.await
.insert(room_id.to_owned(), services().globals.next_count()?);
let _ = self.typing_update_sender.send(room_id.to_owned());
}
Ok(())
}
/// Returns the count of the last typing update in this room.
pub fn last_typing_update(&self, room_id: &RoomId) -> Result<u64> {
self.typings_maintain(room_id)?;
self.db.last_typing_update(room_id)
pub async fn last_typing_update(&self, room_id: &RoomId) -> Result<u64> {
self.typings_maintain(room_id).await?;
Ok(self
.last_typing_update
.read()
.await
.get(room_id)
.copied()
.unwrap_or(0))
}
/// Returns a new typing EDU.
pub fn typings_all(
pub async fn typings_all(
&self,
room_id: &RoomId,
) -> Result<SyncEphemeralRoomEvent<ruma::events::typing::TypingEventContent>> {
let user_ids = self.db.typings_all(room_id)?;
Ok(SyncEphemeralRoomEvent {
content: ruma::events::typing::TypingEventContent {
user_ids: user_ids.into_iter().collect(),
user_ids: self
.typing
.read()
.await
.get(room_id)
.map(|m| m.keys().cloned().collect())
.unwrap_or_default(),
},
})
}

View file

@ -1,25 +1,23 @@
/// An async function that can recursively call itself.
type AsyncRecursiveType<'a, T> = Pin<Box<dyn Future<Output = T> + 'a + Send>>;
use ruma::{
api::federation::discovery::{get_remote_server_keys, get_server_keys},
CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, OwnedServerSigningKeyId,
RoomVersionId,
};
use std::{
collections::{hash_map, BTreeMap, HashMap, HashSet},
pin::Pin,
sync::{Arc, RwLock, RwLockWriteGuard},
sync::Arc,
time::{Duration, Instant, SystemTime},
};
use tokio::sync::Semaphore;
use futures_util::{stream::FuturesUnordered, Future, StreamExt};
use ruma::{
api::{
client::error::ErrorKind,
federation::{
discovery::get_remote_server_keys_batch::{self, v2::QueryCriteria},
discovery::{
get_remote_server_keys,
get_remote_server_keys_batch::{self, v2::QueryCriteria},
get_server_keys,
},
event::{get_event, get_room_state_ids},
membership::create_join_event,
},
@ -31,9 +29,11 @@ use ruma::{
int,
serde::Base64,
state_res::{self, RoomVersion, StateMap},
uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName,
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName,
};
use serde_json::value::RawValue as RawJsonValue;
use tokio::sync::{RwLock, RwLockWriteGuard, Semaphore};
use tracing::{debug, error, info, trace, warn};
use crate::{service::*, services, Error, PduEvent, Result};
@ -92,7 +92,7 @@ impl Service {
));
}
services().rooms.event_handler.acl_check(origin, &room_id)?;
services().rooms.event_handler.acl_check(origin, room_id)?;
// 1. Skip the PDU if we already have it as a timeline event
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? {
@ -168,7 +168,7 @@ impl Service {
.globals
.bad_event_ratelimiter
.read()
.unwrap()
.await
.get(&*prev_id)
{
// Exponential backoff
@ -184,7 +184,22 @@ impl Service {
}
if errors >= 5 {
break;
// Timeout other events
match services()
.globals
.bad_event_ratelimiter
.write()
.await
.entry((*prev_id).to_owned())
{
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1 + 1)
}
}
continue;
}
if let Some((pdu, json)) = eventid_info.remove(&*prev_id) {
@ -198,7 +213,7 @@ impl Service {
.globals
.roomid_federationhandletime
.write()
.unwrap()
.await
.insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time));
if let Err(e) = self
@ -218,7 +233,7 @@ impl Service {
.globals
.bad_event_ratelimiter
.write()
.unwrap()
.await
.entry((*prev_id).to_owned())
{
hash_map::Entry::Vacant(e) => {
@ -234,7 +249,7 @@ impl Service {
.globals
.roomid_federationhandletime
.write()
.unwrap()
.await
.remove(&room_id.to_owned());
debug!(
"Handling prev event {} took {}m{}s",
@ -252,7 +267,7 @@ impl Service {
.globals
.roomid_federationhandletime
.write()
.unwrap()
.await
.insert(room_id.to_owned(), (event_id.to_owned(), start_time));
let r = services()
.rooms
@ -270,12 +285,13 @@ impl Service {
.globals
.roomid_federationhandletime
.write()
.unwrap()
.await
.remove(&room_id.to_owned());
r
}
#[allow(clippy::type_complexity, clippy::too_many_arguments)]
#[tracing::instrument(skip(self, create_event, value, pub_key_map))]
fn handle_outlier_pdu<'a>(
&'a self,
@ -310,11 +326,8 @@ impl Service {
let room_version =
RoomVersion::new(room_version_id).expect("room version is supported");
let mut val = match ruma::signatures::verify_event(
&pub_key_map.read().expect("RwLock is poisoned."),
&value,
room_version_id,
) {
let guard = pub_key_map.read().await;
let mut val = match ruma::signatures::verify_event(&guard, &value, room_version_id) {
Err(e) => {
// Drop
warn!("Dropping bad event {}: {}", event_id, e,);
@ -349,6 +362,8 @@ impl Service {
Ok(ruma::signatures::Verified::All) => value,
};
drop(guard);
// Now that we have checked the signature and hashes we can add the eventID and convert
// to our PduEvent type
val.insert(
@ -676,13 +691,15 @@ impl Service {
{
Ok(res) => {
debug!("Fetching state events at event.");
let collect = res
.pdu_ids
.iter()
.map(|x| Arc::from(&**x))
.collect::<Vec<_>>();
let state_vec = self
.fetch_and_handle_outliers(
origin,
&res.pdu_ids
.iter()
.map(|x| Arc::from(&**x))
.collect::<Vec<_>>(),
&collect,
create_event,
room_id,
room_version_id,
@ -789,7 +806,7 @@ impl Service {
.globals
.roomid_mutex_state
.write()
.unwrap()
.await
.entry(room_id.to_owned())
.or_default(),
);
@ -868,14 +885,18 @@ impl Service {
debug!("Starting soft fail auth check");
if soft_fail {
services().rooms.timeline.append_incoming_pdu(
&incoming_pdu,
val,
extremities.iter().map(|e| (**e).to_owned()).collect(),
state_ids_compressed,
soft_fail,
&state_lock,
)?;
services()
.rooms
.timeline
.append_incoming_pdu(
&incoming_pdu,
val,
extremities.iter().map(|e| (**e).to_owned()).collect(),
state_ids_compressed,
soft_fail,
&state_lock,
)
.await?;
// Soft fail, we keep the event as an outlier but don't add it to the timeline
warn!("Event was soft failed: {:?}", incoming_pdu);
@ -896,14 +917,18 @@ impl Service {
// We use the `state_at_event` instead of `state_after` so we accurately
// represent the state for this event.
let pdu_id = services().rooms.timeline.append_incoming_pdu(
&incoming_pdu,
val,
extremities.iter().map(|e| (**e).to_owned()).collect(),
state_ids_compressed,
soft_fail,
&state_lock,
)?;
let pdu_id = services()
.rooms
.timeline
.append_incoming_pdu(
&incoming_pdu,
val,
extremities.iter().map(|e| (**e).to_owned()).collect(),
state_ids_compressed,
soft_fail,
&state_lock,
)
.await?;
debug!("Appended incoming pdu");
@ -965,14 +990,21 @@ impl Service {
debug!("Resolving state");
let lock = services().globals.stateres_mutex.lock();
let state = match state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| {
let fetch_event = |id: &_| {
let res = services().rooms.timeline.get_pdu(id);
if let Err(e) = &res {
error!("LOOK AT ME Failed to fetch event: {}", e);
}
res.ok().flatten()
}) {
};
let lock = services().globals.stateres_mutex.lock();
let state = match state_res::resolve(
room_version_id,
&fork_states,
auth_chain_sets,
fetch_event,
) {
Ok(new_state) => new_state,
Err(_) => {
return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization"));
@ -1009,6 +1041,7 @@ impl Service {
/// b. Look at outlier pdu tree
/// c. Ask origin server over federation
/// d. TODO: Ask other servers over federation?
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip_all)]
pub(crate) fn fetch_and_handle_outliers<'a>(
&'a self,
@ -1021,17 +1054,21 @@ impl Service {
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>>
{
Box::pin(async move {
let back_off = |id| match services()
.globals
.bad_event_ratelimiter
.write()
.unwrap()
.entry(id)
{
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
let back_off = |id| async move {
match services()
.globals
.bad_event_ratelimiter
.write()
.await
.entry(id)
{
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1 + 1)
}
}
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
};
let mut pdus = vec![];
@ -1057,7 +1094,7 @@ impl Service {
.globals
.bad_event_ratelimiter
.read()
.unwrap()
.await
.get(&*next_id)
{
// Exponential backoff
@ -1104,7 +1141,7 @@ impl Service {
match pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) {
Ok(t) => t,
Err(_) => {
back_off((*next_id).to_owned());
back_off((*next_id).to_owned()).await;
continue;
}
};
@ -1136,7 +1173,7 @@ impl Service {
}
Err(_) => {
warn!("Failed to fetch event: {}", next_id);
back_off((*next_id).to_owned());
back_off((*next_id).to_owned()).await;
}
}
}
@ -1146,7 +1183,7 @@ impl Service {
.globals
.bad_event_ratelimiter
.read()
.unwrap()
.await
.get(&**next_id)
{
// Exponential backoff
@ -1181,7 +1218,7 @@ impl Service {
}
Err(e) => {
warn!("Authentication of event {} failed: {:?}", next_id, e);
back_off((**next_id).to_owned());
back_off((**next_id).to_owned()).await;
}
}
}
@ -1336,7 +1373,7 @@ impl Service {
pub_key_map
.write()
.map_err(|_| Error::bad_database("RwLock is poisoned."))?
.await
.insert(signature_server.clone(), keys);
}
@ -1345,7 +1382,7 @@ impl Service {
// Gets a list of servers for which we don't have the signing key yet. We go over
// the PDUs and either cache the key or add it to the list that needs to be retrieved.
fn get_server_keys_from_cache(
async fn get_server_keys_from_cache(
&self,
pdu: &RawJsonValue,
servers: &mut BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
@ -1369,7 +1406,7 @@ impl Service {
.globals
.bad_event_ratelimiter
.read()
.unwrap()
.await
.get(event_id)
{
// Exponential backoff
@ -1445,17 +1482,19 @@ impl Service {
> = BTreeMap::new();
{
let mut pkm = pub_key_map
.write()
.map_err(|_| Error::bad_database("RwLock is poisoned."))?;
let mut pkm = pub_key_map.write().await;
// Try to fetch keys, failure is okay
// Servers we couldn't find in the cache will be added to `servers`
for pdu in &event.room_state.state {
let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm);
let _ = self
.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm)
.await;
}
for pdu in &event.room_state.auth_chain {
let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm);
let _ = self
.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm)
.await;
}
drop(pkm);
@ -1479,9 +1518,7 @@ impl Service {
.await
{
trace!("Got signing keys: {:?}", keys);
let mut pkm = pub_key_map
.write()
.map_err(|_| Error::bad_database("RwLock is poisoned."))?;
let mut pkm = pub_key_map.write().await;
for k in keys.server_keys {
let k = match k.deserialize() {
Ok(key) => key,
@ -1540,10 +1577,7 @@ impl Service {
.into_iter()
.map(|(k, v)| (k.to_string(), v.key))
.collect();
pub_key_map
.write()
.map_err(|_| Error::bad_database("RwLock is poisoned."))?
.insert(origin.to_string(), result);
pub_key_map.write().await.insert(origin.to_string(), result);
}
}
info!("Done handling result");
@ -1608,14 +1642,14 @@ impl Service {
.globals
.servername_ratelimiter
.read()
.unwrap()
.await
.get(origin)
.map(|s| Arc::clone(s).acquire_owned());
let permit = match permit {
Some(p) => p,
None => {
let mut write = services().globals.servername_ratelimiter.write().unwrap();
let mut write = services().globals.servername_ratelimiter.write().await;
let s = Arc::clone(
write
.entry(origin.to_owned())
@ -1627,24 +1661,26 @@ impl Service {
}
.await;
let back_off = |id| match services()
.globals
.bad_signature_ratelimiter
.write()
.unwrap()
.entry(id)
{
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
let back_off = |id| async {
match services()
.globals
.bad_signature_ratelimiter
.write()
.await
.entry(id)
{
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
}
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
};
if let Some((time, tries)) = services()
.globals
.bad_signature_ratelimiter
.read()
.unwrap()
.await
.get(&signature_ids)
{
// Exponential backoff
@ -1751,7 +1787,7 @@ impl Service {
drop(permit);
back_off(signature_ids);
back_off(signature_ids).await;
warn!("Failed to find public key for server: {}", origin);
Err(Error::BadServerResponse(

View file

@ -1,11 +1,9 @@
mod data;
use std::{
collections::{HashMap, HashSet},
sync::Mutex,
};
use std::collections::{HashMap, HashSet};
pub use data::Data;
use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId};
use tokio::sync::Mutex;
use crate::Result;
@ -14,6 +12,7 @@ use super::timeline::PduCount;
pub struct Service {
pub db: &'static dyn Data,
#[allow(clippy::type_complexity)]
pub lazy_load_waiting:
Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount), HashSet<OwnedUserId>>>,
}
@ -32,7 +31,7 @@ impl Service {
}
#[tracing::instrument(skip(self))]
pub fn lazy_load_mark_sent(
pub async fn lazy_load_mark_sent(
&self,
user_id: &UserId,
device_id: &DeviceId,
@ -40,7 +39,7 @@ impl Service {
lazy_load: HashSet<OwnedUserId>,
count: PduCount,
) {
self.lazy_load_waiting.lock().unwrap().insert(
self.lazy_load_waiting.lock().await.insert(
(
user_id.to_owned(),
device_id.to_owned(),
@ -52,14 +51,14 @@ impl Service {
}
#[tracing::instrument(skip(self))]
pub fn lazy_load_confirm_delivery(
pub async fn lazy_load_confirm_delivery(
&self,
user_id: &UserId,
device_id: &DeviceId,
room_id: &RoomId,
since: PduCount,
) -> Result<()> {
if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&(
if let Some(user_ids) = self.lazy_load_waiting.lock().await.remove(&(
user_id.to_owned(),
device_id.to_owned(),
room_id.to_owned(),

View file

@ -5,6 +5,7 @@ use ruma::{EventId, RoomId, UserId};
pub trait Data: Send + Sync {
fn add_relation(&self, from: u64, to: u64) -> Result<()>;
#[allow(clippy::type_complexity)]
fn relations_until<'a>(
&'a self,
user_id: &'a UserId,

View file

@ -40,6 +40,7 @@ impl Service {
}
}
#[allow(clippy::too_many_arguments)]
pub fn paginate_relations_with_filter(
&self,
sender_user: &UserId,
@ -82,7 +83,7 @@ impl Service {
services()
.rooms
.state_accessor
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.unwrap_or(false)
})
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
@ -106,7 +107,7 @@ impl Service {
let events_before: Vec<_> = services()
.rooms
.pdu_metadata
.relations_until(sender_user, &room_id, target, from)?
.relations_until(sender_user, room_id, target, from)?
.filter(|r| {
r.as_ref().map_or(true, |(_, pdu)| {
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
@ -129,7 +130,7 @@ impl Service {
services()
.rooms
.state_accessor
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.unwrap_or(false)
})
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`

View file

@ -4,6 +4,7 @@ use ruma::RoomId;
pub trait Data: Send + Sync {
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
#[allow(clippy::type_complexity)]
fn search_pdus<'a>(
&'a self,
room_id: &RoomId,

View file

@ -1,4 +1,4 @@
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use lru_cache::LruCache;
use ruma::{
@ -25,6 +25,7 @@ use ruma::{
space::SpaceRoomJoinRule,
OwnedRoomId, RoomId, UserId,
};
use tokio::sync::Mutex;
use tracing::{debug, error, warn};
@ -79,7 +80,7 @@ impl Service {
if let Some(cached) = self
.roomid_spacechunk_cache
.lock()
.unwrap()
.await
.get_mut(&current_room.to_owned())
.as_ref()
{
@ -134,7 +135,7 @@ impl Service {
if serde_json::from_str::<SpaceChildEventContent>(pdu.content.get())
.ok()
.and_then(|c| c.via)
.map(|c| c.via)
.map_or(true, |v| v.is_empty())
{
continue;
@ -171,7 +172,7 @@ impl Service {
.transpose()?
.unwrap_or(JoinRule::Invite);
self.roomid_spacechunk_cache.lock().unwrap().insert(
self.roomid_spacechunk_cache.lock().await.insert(
current_room.clone(),
Some(CachedSpaceChunk {
chunk,
@ -185,7 +186,9 @@ impl Service {
stack.push(children_ids);
}
} else {
let server = current_room.server_name();
let server = current_room
.server_name()
.expect("Room IDs should always have a server name");
if server == services().globals.server_name() {
continue;
}
@ -193,11 +196,11 @@ impl Service {
// Early return so the client can see some data already
break;
}
warn!("Asking {server} for /hierarchy");
debug!("Asking {server} for /hierarchy");
if let Ok(response) = services()
.sending
.send_federation_request(
&server,
server,
federation::space::get_hierarchy::v1::Request {
room_id: current_room.to_owned(),
suggested_only,
@ -235,7 +238,7 @@ impl Service {
.room
.allowed_room_ids
.into_iter()
.map(|room| AllowRule::room_membership(room))
.map(AllowRule::room_membership)
.collect(),
})
}
@ -245,7 +248,7 @@ impl Service {
.room
.allowed_room_ids
.into_iter()
.map(|room| AllowRule::room_membership(room))
.map(AllowRule::room_membership)
.collect(),
})
}
@ -263,7 +266,7 @@ impl Service {
}
}
self.roomid_spacechunk_cache.lock().unwrap().insert(
self.roomid_spacechunk_cache.lock().await.insert(
current_room.clone(),
Some(CachedSpaceChunk {
chunk,
@ -287,7 +290,7 @@ impl Service {
} else {
self.roomid_spacechunk_cache
.lock()
.unwrap()
.await
.insert(current_room.clone(), None);
}
}
@ -313,7 +316,7 @@ impl Service {
canonical_alias: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
.room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomCanonicalAliasEventContent| c.alias)
@ -321,11 +324,11 @@ impl Service {
Error::bad_database("Invalid canonical alias event in database.")
})
})?,
name: services().rooms.state_accessor.get_name(&room_id)?,
name: services().rooms.state_accessor.get_name(room_id)?,
num_joined_members: services()
.rooms
.state_cache
.room_joined_count(&room_id)?
.room_joined_count(room_id)?
.unwrap_or_else(|| {
warn!("Room {} has no member count", room_id);
0
@ -336,7 +339,7 @@ impl Service {
topic: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
.room_state_get(room_id, &StateEventType::RoomTopic, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomTopicEventContent| Some(c.topic))
@ -348,7 +351,7 @@ impl Service {
world_readable: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")?
.map_or(Ok(false), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomHistoryVisibilityEventContent| {
@ -363,7 +366,7 @@ impl Service {
guest_can_join: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
.room_state_get(room_id, &StateEventType::RoomGuestAccess, "")?
.map_or(Ok(false), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomGuestAccessEventContent| {
@ -376,7 +379,7 @@ impl Service {
avatar_url: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
.room_state_get(room_id, &StateEventType::RoomAvatar, "")?
.map(|s| {
serde_json::from_str(s.content.get())
.map(|c: RoomAvatarEventContent| c.url)
@ -389,7 +392,7 @@ impl Service {
let join_rule = services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
.room_state_get(room_id, &StateEventType::RoomJoinRules, "")?
.map(|s| {
serde_json::from_str(s.content.get())
.map(|c: RoomJoinRulesEventContent| c.join_rule)
@ -415,7 +418,7 @@ impl Service {
room_type: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
.room_state_get(room_id, &StateEventType::RoomCreate, "")?
.map(|s| {
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(|e| {
error!("Invalid room create event in database: {}", e);
@ -455,7 +458,7 @@ impl Service {
SpaceRoomJoinRule::Invite => services()
.rooms
.state_cache
.is_joined(sender_user, &room_id)?,
.is_joined(sender_user, room_id)?,
_ => false,
};
@ -479,17 +482,14 @@ impl Service {
match join_rule {
JoinRule::Restricted(r) => {
for rule in &r.allow {
match rule {
join_rules::AllowRule::RoomMembership(rm) => {
if let Ok(true) = services()
.rooms
.state_cache
.is_joined(sender_user, &rm.room_id)
{
return Ok(true);
}
if let join_rules::AllowRule::RoomMembership(rm) = rule {
if let Ok(true) = services()
.rooms
.state_cache
.is_joined(sender_user, &rm.room_id)
{
return Ok(true);
}
_ => {}
}
}

View file

@ -41,7 +41,7 @@ impl Service {
services()
.rooms
.state_compressor
.parse_compressed_state_event(&new)
.parse_compressed_state_event(new)
.ok()
.map(|(_, id)| id)
}) {
@ -95,7 +95,7 @@ impl Service {
.spaces
.roomid_spacechunk_cache
.lock()
.unwrap()
.await
.remove(&pdu.room_id);
}
_ => continue,
@ -412,7 +412,7 @@ impl Service {
services()
.rooms
.state_compressor
.parse_compressed_state_event(&compressed)
.parse_compressed_state_event(compressed)
.ok()
})
.filter_map(|(shortstatekey, event_id)| {

View file

@ -16,11 +16,12 @@ use ruma::{
},
StateEventType,
},
EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
EventId, JsOption, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
};
use tracing::error;
use serde_json::value::to_raw_value;
use tracing::{error, warn};
use crate::{services, Error, PduEvent, Result};
use crate::{service::pdu::PduBuilder, services, Error, PduEvent, Result};
pub struct Service {
pub db: &'static dyn Data,
@ -180,7 +181,7 @@ impl Service {
return Ok(*visibility);
}
let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
let history_visibility = self
.state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")?
@ -197,11 +198,11 @@ impl Service {
HistoryVisibility::Shared => currently_member,
HistoryVisibility::Invited => {
// Allow if any member on requesting server was AT LEAST invited, else deny
self.user_was_invited(shortstatehash, &user_id)
self.user_was_invited(shortstatehash, user_id)
}
HistoryVisibility::Joined => {
// Allow if any member on requested server was joined, else deny
self.user_was_joined(shortstatehash, &user_id)
self.user_was_joined(shortstatehash, user_id)
}
_ => {
error!("Unknown history visibility {history_visibility}");
@ -221,10 +222,10 @@ impl Service {
/// the room's history_visibility at that event's state.
#[tracing::instrument(skip(self, user_id, room_id))]
pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
let history_visibility = self
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")?
.map_or(Ok(HistoryVisibility::Shared), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomHistoryVisibilityEventContent| c.history_visibility)
@ -276,25 +277,66 @@ impl Service {
services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomName, "")?
.room_state_get(room_id, &StateEventType::RoomName, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomNameEventContent| c.name)
.map_err(|_| Error::bad_database("Invalid room name event in database."))
.map(|c: RoomNameEventContent| Some(c.name))
.map_err(|e| {
error!(
"Invalid room name event in database for room {}. {}",
room_id, e
);
Error::bad_database("Invalid room name event in database.")
})
})
}
pub fn get_avatar(&self, room_id: &RoomId) -> Result<Option<RoomAvatarEventContent>> {
pub fn get_avatar(&self, room_id: &RoomId) -> Result<JsOption<RoomAvatarEventContent>> {
services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
.map_or(Ok(None), |s| {
.room_state_get(room_id, &StateEventType::RoomAvatar, "")?
.map_or(Ok(JsOption::Undefined), |s| {
serde_json::from_str(s.content.get())
.map_err(|_| Error::bad_database("Invalid room avatar event in database."))
})
}
pub async fn user_can_invite(
&self,
room_id: &RoomId,
sender: &UserId,
target_user: &UserId,
) -> Result<bool> {
let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite))
.expect("Event content always serializes");
let new_event = PduBuilder {
event_type: ruma::events::TimelineEventType::RoomMember,
content,
unsigned: None,
state_key: Some(target_user.into()),
redacts: None,
};
let mutex_state = Arc::clone(
services()
.globals
.roomid_mutex_state
.write()
.await
.entry(room_id.to_owned())
.or_default(),
);
let state_lock = mutex_state.lock().await;
Ok(services()
.rooms
.timeline
.create_hash_and_sign_event(new_event, sender, room_id, &state_lock)
.is_ok())
}
pub fn get_member(
&self,
room_id: &RoomId,
@ -303,7 +345,7 @@ impl Service {
services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomMember, user_id.as_str())?
.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map_err(|_| Error::bad_database("Invalid room member event in database."))

View file

@ -1,6 +1,6 @@
use std::{collections::HashSet, sync::Arc};
use crate::Result;
use crate::{service::appservice::RegistrationInfo, Result};
use ruma::{
events::{AnyStrippedStateEvent, AnySyncStateEvent},
serde::Raw,
@ -22,11 +22,7 @@ pub trait Data: Send + Sync {
fn get_our_real_users(&self, room_id: &RoomId) -> Result<Arc<HashSet<OwnedUserId>>>;
fn appservice_in_room(
&self,
room_id: &RoomId,
appservice: &(String, serde_yaml::Value),
) -> Result<bool>;
fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result<bool>;
/// Makes a user forget a room.
fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()>;
@ -78,6 +74,7 @@ pub trait Data: Send + Sync {
) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a>;
/// Returns an iterator over all rooms a user was invited to.
#[allow(clippy::type_complexity)]
fn rooms_invited<'a>(
&'a self,
user_id: &UserId,
@ -96,6 +93,7 @@ pub trait Data: Send + Sync {
) -> Result<Option<Vec<Raw<AnyStrippedStateEvent>>>>;
/// Returns an iterator over all rooms a user left.
#[allow(clippy::type_complexity)]
fn rooms_left<'a>(
&'a self,
user_id: &UserId,

View file

@ -16,7 +16,7 @@ use ruma::{
};
use tracing::warn;
use crate::{services, Error, Result};
use crate::{service::appservice::RegistrationInfo, services, Error, Result};
pub struct Service {
pub db: &'static dyn Data,
@ -205,7 +205,7 @@ impl Service {
pub fn appservice_in_room(
&self,
room_id: &RoomId,
appservice: &(String, serde_yaml::Value),
appservice: &RegistrationInfo,
) -> Result<bool> {
self.db.appservice_in_room(room_id, appservice)
}

View file

@ -16,6 +16,7 @@ use self::data::StateDiff;
pub struct Service {
pub db: &'static dyn Data,
#[allow(clippy::type_complexity)]
pub stateinfo_cache: Mutex<
LruCache<
u64,
@ -33,6 +34,7 @@ pub type CompressedStateEvent = [u8; 2 * size_of::<u64>()];
impl Service {
/// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer.
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip(self))]
pub fn load_shortstatehash_info(
&self,
@ -131,6 +133,7 @@ impl Service {
/// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid
/// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer
/// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip(
self,
statediffnew,
@ -164,7 +167,7 @@ impl Service {
for removed in statediffremoved.iter() {
if !parent_new.remove(removed) {
// It was not added in the parent and we removed it
parent_removed.insert(removed.clone());
parent_removed.insert(*removed);
}
// Else it was added in the parent and we removed it again. We can forget this change
}
@ -172,7 +175,7 @@ impl Service {
for new in statediffnew.iter() {
if !parent_removed.remove(new) {
// It was not touched in the parent and we added it
parent_new.insert(new.clone());
parent_new.insert(*new);
}
// Else it was removed in the parent and we added it again. We can forget this change
}
@ -217,7 +220,7 @@ impl Service {
for removed in statediffremoved.iter() {
if !parent_new.remove(removed) {
// It was not added in the parent and we removed it
parent_removed.insert(removed.clone());
parent_removed.insert(*removed);
}
// Else it was added in the parent and we removed it again. We can forget this change
}
@ -225,7 +228,7 @@ impl Service {
for new in statediffnew.iter() {
if !parent_removed.remove(new) {
// It was not touched in the parent and we added it
parent_new.insert(new.clone());
parent_new.insert(*new);
}
// Else it was removed in the parent and we added it again. We can forget this change
}
@ -253,6 +256,7 @@ impl Service {
}
/// Returns the new shortstatehash, and the state diff from the previous room state
#[allow(clippy::type_complexity)]
pub fn save_state(
&self,
room_id: &RoomId,

View file

@ -2,6 +2,7 @@ use crate::{PduEvent, Result};
use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId};
pub trait Data: Send + Sync {
#[allow(clippy::type_complexity)]
fn threads_until<'a>(
&'a self,
user_id: &'a UserId,

View file

@ -26,7 +26,7 @@ impl Service {
self.db.threads_until(user_id, room_id, until, include)
}
pub fn add_to_thread<'a>(&'a self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> {
pub fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> {
let root_id = &services()
.rooms
.timeline
@ -103,7 +103,7 @@ impl Service {
}
let mut users = Vec::new();
if let Some(userids) = self.db.get_participants(&root_id)? {
if let Some(userids) = self.db.get_participants(root_id)? {
users.extend_from_slice(&userids);
users.push(pdu.sender.clone());
} else {

View file

@ -66,6 +66,7 @@ pub trait Data: Send + Sync {
/// Returns an iterator over all events and their tokens in a room that happened before the
/// event with id `until` in reverse-chronological order.
#[allow(clippy::type_complexity)]
fn pdus_until<'a>(
&'a self,
user_id: &UserId,
@ -75,6 +76,7 @@ pub trait Data: Send + Sync {
/// Returns an iterator over all events in a room that happened after the event with id `from`
/// in chronological order.
#[allow(clippy::type_complexity)]
fn pdus_after<'a>(
&'a self,
user_id: &UserId,

View file

@ -2,16 +2,12 @@ mod data;
use std::{
cmp::Ordering,
collections::{BTreeMap, HashMap},
};
use std::{
collections::HashSet,
sync::{Arc, Mutex, RwLock},
collections::{BTreeMap, HashMap, HashSet},
sync::Arc,
};
pub use data::Data;
use regex::Regex;
use ruma::{
api::{client::error::ErrorKind, federation},
canonical_json::to_canonical_value,
@ -25,19 +21,21 @@ use ruma::{
},
push::{Action, Ruleset, Tweak},
serde::Base64,
state_res,
state_res::{Event, RoomVersion},
state_res::{self, Event, RoomVersion},
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
OwnedServerName, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
OwnedServerName, RoomId, RoomVersionId, ServerName, UserId,
};
use serde::Deserialize;
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use tokio::sync::MutexGuard;
use tokio::sync::{Mutex, MutexGuard, RwLock};
use tracing::{error, info, warn};
use crate::{
api::server_server,
service::pdu::{EventHash, PduBuilder},
service::{
appservice::NamespaceRegex,
pdu::{EventHash, PduBuilder},
},
services, utils, Error, PduEvent, Result,
};
@ -58,8 +56,8 @@ impl PduCount {
}
pub fn try_from_string(token: &str) -> Result<Self> {
if token.starts_with('-') {
token[1..].parse().map(PduCount::Backfilled)
if let Some(stripped) = token.strip_prefix('-') {
stripped.parse().map(PduCount::Backfilled)
} else {
token.parse().map(PduCount::Normal)
}
@ -90,18 +88,6 @@ impl Ord for PduCount {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn comparisons() {
assert!(PduCount::Normal(1) < PduCount::Normal(2));
assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1));
assert!(PduCount::Normal(1) > PduCount::Backfilled(1));
assert!(PduCount::Backfilled(1) < PduCount::Normal(1));
}
}
pub struct Service {
pub db: &'static dyn Data,
@ -112,7 +98,7 @@ pub struct Service {
impl Service {
#[tracing::instrument(skip(self))]
pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>>> {
self.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)?
self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)?
.next()
.map(|o| o.map(|(_, p)| Arc::new(p)))
.transpose()
@ -213,7 +199,7 @@ impl Service {
///
/// Returns pdu id
#[tracing::instrument(skip(self, pdu, pdu_json, leaves))]
pub fn append_pdu<'a>(
pub async fn append_pdu<'a>(
&self,
pdu: &PduEvent,
mut pdu_json: CanonicalJsonObject,
@ -275,11 +261,11 @@ impl Service {
.globals
.roomid_mutex_insert
.write()
.unwrap()
.await
.entry(pdu.room_id.clone())
.or_default(),
);
let insert_lock = mutex_insert.lock().unwrap();
let insert_lock = mutex_insert.lock().await;
let count1 = services().globals.next_count()?;
// Mark as read first so the sending client doesn't get a notification even if appending
@ -320,12 +306,25 @@ impl Service {
let mut notifies = Vec::new();
let mut highlights = Vec::new();
for user in services()
let mut push_target = services()
.rooms
.state_cache
.get_our_real_users(&pdu.room_id)?
.iter()
{
.get_our_real_users(&pdu.room_id)?;
if pdu.kind == TimelineEventType::RoomMember {
if let Some(state_key) = &pdu.state_key {
let target_user_id = UserId::parse(state_key.clone())
.expect("This state_key was previously validated");
if !push_target.contains(&target_user_id) {
let mut target = push_target.as_ref().clone();
target.insert(target_user_id);
push_target = Arc::new(target);
}
}
}
for user in push_target.iter() {
// Don't notify the user of their own events
if user == &pdu.sender {
continue;
@ -418,7 +417,7 @@ impl Service {
.spaces
.roomid_spacechunk_cache
.lock()
.unwrap()
.await
.remove(&pdu.room_id);
}
}
@ -471,26 +470,22 @@ impl Service {
.search
.index_pdu(shortroomid, &pdu_id, &body)?;
let admin_room = services().rooms.alias.resolve_local_alias(
<&RoomAliasId>::try_from(
format!("#admins:{}", services().globals.server_name()).as_str(),
)
.expect("#admins:server_name is a valid room alias"),
)?;
let server_user = format!("@conduit:{}", services().globals.server_name());
let to_conduit = body.starts_with(&format!("{server_user}: "))
|| body.starts_with(&format!("{server_user} "))
|| body == format!("{server_user}:")
|| body == format!("{server_user}");
|| body == server_user;
// This will evaluate to false if the emergency password is set up so that
// the administrator can execute commands as conduit
let from_conduit = pdu.sender == server_user
&& services().globals.emergency_password().is_none();
if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) {
services().admin.process_message(body);
if let Some(admin_room) = services().admin.get_admin_room()? {
if to_conduit && !from_conduit && admin_room == pdu.room_id {
services().admin.process_message(body);
}
}
}
}
@ -553,15 +548,15 @@ impl Service {
}
}
for appservice in services().appservice.all()? {
for appservice in services().appservice.read().await.values() {
if services()
.rooms
.state_cache
.appservice_in_room(&pdu.room_id, &appservice)?
.appservice_in_room(&pdu.room_id, appservice)?
{
services()
.sending
.send_pdu_appservice(appservice.0, pdu_id.clone())?;
.send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?;
continue;
}
@ -573,73 +568,41 @@ impl Service {
.as_ref()
.and_then(|state_key| UserId::parse(state_key.as_str()).ok())
{
if let Some(appservice_uid) = appservice
.1
.get("sender_localpart")
.and_then(|string| string.as_str())
.and_then(|string| {
UserId::parse_with_server_name(string, services().globals.server_name())
.ok()
})
{
if state_key_uid == &appservice_uid {
services()
.sending
.send_pdu_appservice(appservice.0, pdu_id.clone())?;
continue;
}
let appservice_uid = appservice.registration.sender_localpart.as_str();
if state_key_uid == appservice_uid {
services().sending.send_pdu_appservice(
appservice.registration.id.clone(),
pdu_id.clone(),
)?;
continue;
}
}
}
if let Some(namespaces) = appservice.1.get("namespaces") {
let users = namespaces
.get("users")
.and_then(|users| users.as_sequence())
.map_or_else(Vec::new, |users| {
users
.iter()
.filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok())
.collect::<Vec<_>>()
});
let aliases = namespaces
.get("aliases")
.and_then(|aliases| aliases.as_sequence())
.map_or_else(Vec::new, |aliases| {
aliases
.iter()
.filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok())
.collect::<Vec<_>>()
});
let rooms = namespaces
.get("rooms")
.and_then(|rooms| rooms.as_sequence());
let matching_users = |users: &NamespaceRegex| {
appservice.users.is_match(pdu.sender.as_str())
|| pdu.kind == TimelineEventType::RoomMember
&& pdu
.state_key
.as_ref()
.map_or(false, |state_key| users.is_match(state_key))
};
let matching_aliases = |aliases: &NamespaceRegex| {
services()
.rooms
.alias
.local_aliases_for_room(&pdu.room_id)
.filter_map(|r| r.ok())
.any(|room_alias| aliases.is_match(room_alias.as_str()))
};
let matching_users = |users: &Regex| {
users.is_match(pdu.sender.as_str())
|| pdu.kind == TimelineEventType::RoomMember
&& pdu
.state_key
.as_ref()
.map_or(false, |state_key| users.is_match(state_key))
};
let matching_aliases = |aliases: &Regex| {
services()
.rooms
.alias
.local_aliases_for_room(&pdu.room_id)
.filter_map(|r| r.ok())
.any(|room_alias| aliases.is_match(room_alias.as_str()))
};
if aliases.iter().any(matching_aliases)
|| rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into()))
|| users.iter().any(matching_users)
{
services()
.sending
.send_pdu_appservice(appservice.0, pdu_id.clone())?;
}
if matching_aliases(&appservice.aliases)
|| appservice.rooms.is_match(pdu.room_id.as_str())
|| matching_users(&appservice.users)
{
services()
.sending
.send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?;
}
}
@ -829,7 +792,7 @@ impl Service {
/// Creates a new persisted data unit and adds it to a room. This function takes a
/// roomid_mutex_state, meaning that only this function is able to mutate the room state.
#[tracing::instrument(skip(self, state_lock))]
pub fn build_and_append_pdu(
pub async fn build_and_append_pdu(
&self,
pdu_builder: PduBuilder,
sender: &UserId,
@ -839,89 +802,85 @@ impl Service {
let (pdu, pdu_json) =
self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?;
let admin_room = services().rooms.alias.resolve_local_alias(
<&RoomAliasId>::try_from(
format!("#admins:{}", services().globals.server_name()).as_str(),
)
.expect("#admins:server_name is a valid room alias"),
)?;
if admin_room.filter(|v| v == room_id).is_some() {
match pdu.event_type() {
TimelineEventType::RoomEncryption => {
warn!("Encryption is not allowed in the admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Encryption is not allowed in the admins room.",
));
if let Some(admin_room) = services().admin.get_admin_room()? {
if admin_room == room_id {
match pdu.event_type() {
TimelineEventType::RoomEncryption => {
warn!("Encryption is not allowed in the admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Encryption is not allowed in the admins room.",
));
}
TimelineEventType::RoomMember => {
#[derive(Deserialize)]
struct ExtractMembership {
membership: MembershipState,
}
let target = pdu
.state_key()
.filter(|v| v.starts_with('@'))
.unwrap_or(sender.as_str());
let server_name = services().globals.server_name();
let server_user = format!("@conduit:{}", server_name);
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
if content.membership == MembershipState::Leave {
if target == server_user {
warn!("Conduit user cannot leave from admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Conduit user cannot leave from admins room.",
));
}
let count = services()
.rooms
.state_cache
.room_members(room_id)
.filter_map(|m| m.ok())
.filter(|m| m.server_name() == server_name)
.filter(|m| m != target)
.count();
if count < 2 {
warn!("Last admin cannot leave from admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Last admin cannot leave from admins room.",
));
}
}
if content.membership == MembershipState::Ban && pdu.state_key().is_some() {
if target == server_user {
warn!("Conduit user cannot be banned in admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Conduit user cannot be banned in admins room.",
));
}
let count = services()
.rooms
.state_cache
.room_members(room_id)
.filter_map(|m| m.ok())
.filter(|m| m.server_name() == server_name)
.filter(|m| m != target)
.count();
if count < 2 {
warn!("Last admin cannot be banned in admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Last admin cannot be banned in admins room.",
));
}
}
}
_ => {}
}
TimelineEventType::RoomMember => {
#[derive(Deserialize)]
struct ExtractMembership {
membership: MembershipState,
}
let target = pdu
.state_key()
.filter(|v| v.starts_with("@"))
.unwrap_or(sender.as_str());
let server_name = services().globals.server_name();
let server_user = format!("@conduit:{}", server_name);
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
if content.membership == MembershipState::Leave {
if target == &server_user {
warn!("Conduit user cannot leave from admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Conduit user cannot leave from admins room.",
));
}
let count = services()
.rooms
.state_cache
.room_members(room_id)
.filter_map(|m| m.ok())
.filter(|m| m.server_name() == server_name)
.filter(|m| m != target)
.count();
if count < 2 {
warn!("Last admin cannot leave from admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Last admin cannot leave from admins room.",
));
}
}
if content.membership == MembershipState::Ban && pdu.state_key().is_some() {
if target == &server_user {
warn!("Conduit user cannot be banned in admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Conduit user cannot be banned in admins room.",
));
}
let count = services()
.rooms
.state_cache
.room_members(room_id)
.filter_map(|m| m.ok())
.filter(|m| m.server_name() == server_name)
.filter(|m| m != target)
.count();
if count < 2 {
warn!("Last admin cannot be banned in admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Last admin cannot be banned in admins room.",
));
}
}
}
_ => {}
}
}
@ -929,14 +888,16 @@ impl Service {
// pdu without it's state. This is okay because append_pdu can't fail.
let statehashid = services().rooms.state.append_to_state(&pdu)?;
let pdu_id = self.append_pdu(
&pdu,
pdu_json,
// Since this PDU references all pdu_leaves we can update the leaves
// of the room
vec![(*pdu.event_id).to_owned()],
state_lock,
)?;
let pdu_id = self
.append_pdu(
&pdu,
pdu_json,
// Since this PDU references all pdu_leaves we can update the leaves
// of the room
vec![(*pdu.event_id).to_owned()],
state_lock,
)
.await?;
// We set the room state after inserting the pdu, so that we never have a moment in time
// where events in the current room state do not exist
@ -974,7 +935,7 @@ impl Service {
/// Append the incoming event setting the state snapshot to the state from the
/// server that sent the event.
#[tracing::instrument(skip_all)]
pub fn append_incoming_pdu<'a>(
pub async fn append_incoming_pdu<'a>(
&self,
pdu: &PduEvent,
pdu_json: CanonicalJsonObject,
@ -1004,11 +965,11 @@ impl Service {
return Ok(None);
}
let pdu_id =
services()
.rooms
.timeline
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)?;
let pdu_id = services()
.rooms
.timeline
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)
.await?;
Ok(Some(pdu_id))
}
@ -1072,7 +1033,7 @@ impl Service {
#[tracing::instrument(skip(self, room_id))]
pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> {
let first_pdu = self
.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)?
.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)?
.next()
.expect("Room is not empty")?;
@ -1084,7 +1045,7 @@ impl Service {
let power_levels: RoomPowerLevelsEventContent = services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")?
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")?
.map(|ev| {
serde_json::from_str(ev.content.get())
.map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
@ -1115,11 +1076,9 @@ impl Service {
.await;
match response {
Ok(response) => {
let mut pub_key_map = RwLock::new(BTreeMap::new());
let pub_key_map = RwLock::new(BTreeMap::new());
for pdu in response.pdus {
if let Err(e) = self
.backfill_pdu(backfill_server, pdu, &mut pub_key_map)
.await
if let Err(e) = self.backfill_pdu(backfill_server, pdu, &pub_key_map).await
{
warn!("Failed to add backfilled pdu: {e}");
}
@ -1151,7 +1110,7 @@ impl Service {
.globals
.roomid_mutex_federation
.write()
.unwrap()
.await
.entry(room_id.to_owned())
.or_default(),
);
@ -1166,7 +1125,7 @@ impl Service {
services()
.rooms
.event_handler
.handle_incoming_pdu(origin, &event_id, &room_id, value, false, &pub_key_map)
.handle_incoming_pdu(origin, &event_id, &room_id, value, false, pub_key_map)
.await?;
let value = self.get_pdu_json(&event_id)?.expect("We just created it");
@ -1183,11 +1142,11 @@ impl Service {
.globals
.roomid_mutex_insert
.write()
.unwrap()
.await
.entry(room_id.clone())
.or_default(),
);
let insert_lock = mutex_insert.lock().unwrap();
let insert_lock = mutex_insert.lock().await;
let count = services().globals.next_count()?;
let mut pdu_id = shortroomid.to_be_bytes().to_vec();
@ -1199,24 +1158,21 @@ impl Service {
drop(insert_lock);
match pdu.kind {
TimelineEventType::RoomMessage => {
#[derive(Deserialize)]
struct ExtractBody {
body: Option<String>,
}
let content = serde_json::from_str::<ExtractBody>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
if let Some(body) = content.body {
services()
.rooms
.search
.index_pdu(shortroomid, &pdu_id, &body)?;
}
if pdu.kind == TimelineEventType::RoomMessage {
#[derive(Deserialize)]
struct ExtractBody {
body: Option<String>,
}
let content = serde_json::from_str::<ExtractBody>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
if let Some(body) = content.body {
services()
.rooms
.search
.index_pdu(shortroomid, &pdu_id, &body)?;
}
_ => {}
}
drop(mutex_lock);
@ -1224,3 +1180,16 @@ impl Service {
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn comparisons() {
assert!(PduCount::Normal(1) < PduCount::Normal(2));
assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1));
assert!(PduCount::Normal(1) > PduCount::Backfilled(1));
assert!(PduCount::Backfilled(1) < PduCount::Normal(1));
}
}

View file

@ -5,6 +5,7 @@ use crate::Result;
use super::{OutgoingKind, SendingEventType};
pub trait Data: Send + Sync {
#[allow(clippy::type_complexity)]
fn active_requests<'a>(
&'a self,
) -> Box<dyn Iterator<Item = Result<(Vec<u8>, OutgoingKind, SendingEventType)>> + 'a>;

View file

@ -22,7 +22,7 @@ use base64::{engine::general_purpose, Engine as _};
use ruma::{
api::{
appservice,
appservice::{self, Registration},
federation::{
self,
transactions::edu::{
@ -131,7 +131,7 @@ impl Service {
for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) {
let entry = initial_transactions
.entry(outgoing_kind.clone())
.or_insert_with(Vec::new);
.or_default();
if entry.len() > 30 {
warn!(
@ -484,11 +484,11 @@ impl Service {
let permit = services().sending.maximum_requests.acquire().await;
let response = appservice_server::send_request(
let response = match appservice_server::send_request(
services()
.appservice
.get_registration(id)
.map_err(|e| (kind.clone(), e))?
.await
.ok_or_else(|| {
(
kind.clone(),
@ -511,8 +511,10 @@ impl Service {
},
)
.await
.map(|_response| kind.clone())
.map_err(|e| (kind, e));
{
Ok(_) => Ok(kind.clone()),
Err(e) => Err((kind.clone(), e)),
};
drop(permit);
@ -698,12 +700,15 @@ impl Service {
response
}
/// Sends a request to an appservice
///
/// Only returns None if there is no url specified in the appservice registration file
#[tracing::instrument(skip(self, registration, request))]
pub async fn send_appservice_request<T: OutgoingRequest>(
&self,
registration: serde_yaml::Value,
registration: Registration,
request: T,
) -> Result<T::IncomingResponse>
) -> Result<Option<T::IncomingResponse>>
where
T: Debug,
{

View file

@ -34,6 +34,7 @@ pub struct SlidingSyncCache {
pub struct Service {
pub db: &'static dyn Data,
#[allow(clippy::type_complexity)]
pub connections:
Mutex<BTreeMap<(OwnedUserId, OwnedDeviceId, String), Arc<Mutex<SlidingSyncCache>>>>,
}
@ -137,12 +138,18 @@ impl Service {
cached.lists.insert(list_id.clone(), list.clone());
}
cached
.subscriptions
.extend(request.room_subscriptions.clone().into_iter());
request
.room_subscriptions
.extend(cached.subscriptions.clone().into_iter());
cached.subscriptions.extend(
request
.room_subscriptions
.iter()
.map(|(k, v)| (k.clone(), v.clone())),
);
request.room_subscriptions.extend(
cached
.subscriptions
.iter()
.map(|(k, v)| (k.clone(), v.clone())),
);
request.extensions.e2ee.enabled = request
.extensions