fix cargo clippy lints

This commit is contained in:
Charles Hall 2023-12-23 19:48:14 -08:00
parent 92c5b6b86c
commit ab1fff2642
No known key found for this signature in database
GPG key ID: 7B8E0645816E07CF
31 changed files with 205 additions and 205 deletions

View file

@ -400,7 +400,7 @@ pub async fn get_member_events_route(
if !services()
.rooms
.state_accessor
.user_can_see_state_events(&sender_user, &body.room_id)?
.user_can_see_state_events(sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,
@ -435,7 +435,7 @@ pub async fn joined_members_route(
if !services()
.rooms
.state_accessor
.user_can_see_state_events(&sender_user, &body.room_id)?
.user_can_see_state_events(sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,
@ -712,7 +712,7 @@ async fn join_room_by_id_helper(
}
info!("Running send_join auth check");
if !state_res::event_auth::auth_check(
let authenticated = state_res::event_auth::auth_check(
&state_res::RoomVersion::new(&room_version_id).expect("room version is supported"),
&parsed_join_pdu,
None::<PduEvent>, // TODO: third party invite
@ -735,7 +735,9 @@ async fn join_room_by_id_helper(
.map_err(|e| {
warn!("Auth check failed: {e}");
Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")
})? {
})?;
if !authenticated {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Auth check failed",

View file

@ -124,7 +124,7 @@ pub async fn get_message_events_route(
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(&t).ok());
.and_then(|t| PduCount::try_from_string(t).ok());
services().rooms.lazy_loading.lazy_load_confirm_delivery(
sender_user,

View file

@ -23,7 +23,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(&t).ok());
.and_then(|t| PduCount::try_from_string(t).ok());
// Use limit or else 10, with maximum 100
let limit = body
@ -73,7 +73,7 @@ pub async fn get_relating_events_with_rel_type_route(
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(&t).ok());
.and_then(|t| PduCount::try_from_string(t).ok());
// Use limit or else 10, with maximum 100
let limit = body
@ -121,7 +121,7 @@ pub async fn get_relating_events_route(
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(&t).ok());
.and_then(|t| PduCount::try_from_string(t).ok());
// Use limit or else 10, with maximum 100
let limit = body

View file

@ -117,12 +117,10 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
} else {
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
};
let user_id =
UserId::parse_with_server_name(username, services().globals.server_name())
.map_err(|_| {
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
})?;
user_id
UserId::parse_with_server_name(username, services().globals.server_name()).map_err(
|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."),
)?
}
_ => {
warn!("Unsupported or unknown login type: {:?}", &body.login_info);

View file

@ -85,7 +85,7 @@ pub async fn get_state_events_route(
if !services()
.rooms
.state_accessor
.user_can_see_state_events(&sender_user, &body.room_id)?
.user_can_see_state_events(sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,
@ -118,7 +118,7 @@ pub async fn get_state_events_for_key_route(
if !services()
.rooms
.state_accessor
.user_can_see_state_events(&sender_user, &body.room_id)?
.user_can_see_state_events(sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,
@ -157,7 +157,7 @@ pub async fn get_state_events_for_empty_key_route(
if !services()
.rooms
.state_accessor
.user_can_see_state_events(&sender_user, &body.room_id)?
.user_can_see_state_events(sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,

View file

@ -554,6 +554,7 @@ async fn sync_helper(
}
}
#[allow(clippy::too_many_arguments)]
async fn load_joined_room(
sender_user: &UserId,
sender_device: &DeviceId,
@ -590,7 +591,7 @@ async fn load_joined_room(
|| services()
.rooms
.user
.last_notification_read(&sender_user, &room_id)?
.last_notification_read(sender_user, room_id)?
> since;
let mut timeline_users = HashSet::new();
@ -599,16 +600,16 @@ async fn load_joined_room(
}
services().rooms.lazy_loading.lazy_load_confirm_delivery(
&sender_user,
&sender_device,
&room_id,
sender_user,
sender_device,
room_id,
sincecount,
)?;
// Database queries:
let current_shortstatehash =
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
s
} else {
error!("Room {} has no state", room_id);
@ -618,7 +619,7 @@ async fn load_joined_room(
let since_shortstatehash = services()
.rooms
.user
.get_token_shortstatehash(&room_id, since)?;
.get_token_shortstatehash(room_id, since)?;
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) =
if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
@ -630,12 +631,12 @@ async fn load_joined_room(
let joined_member_count = services()
.rooms
.state_cache
.room_joined_count(&room_id)?
.room_joined_count(room_id)?
.unwrap_or(0);
let invited_member_count = services()
.rooms
.state_cache
.room_invited_count(&room_id)?
.room_invited_count(room_id)?
.unwrap_or(0);
// Recalculate heroes (first 5 members)
@ -648,7 +649,7 @@ async fn load_joined_room(
for hero in services()
.rooms
.timeline
.all_pdus(&sender_user, &room_id)?
.all_pdus(sender_user, room_id)?
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
.map(|(_, pdu)| {
@ -669,11 +670,11 @@ async fn load_joined_room(
) && (services()
.rooms
.state_cache
.is_joined(&user_id, &room_id)?
.is_joined(&user_id, room_id)?
|| services()
.rooms
.state_cache
.is_invited(&user_id, &room_id)?)
.is_invited(&user_id, room_id)?)
{
Ok::<_, Error>(Some(state_key.clone()))
} else {
@ -789,17 +790,17 @@ async fn load_joined_room(
// Reset lazy loading because this is an initial sync
services().rooms.lazy_loading.lazy_load_reset(
&sender_user,
&sender_device,
&room_id,
sender_user,
sender_device,
room_id,
)?;
// The state_events above should contain all timeline_users, let's mark them as lazy
// loaded.
services().rooms.lazy_loading.lazy_load_mark_sent(
&sender_user,
&sender_device,
&room_id,
sender_user,
sender_device,
room_id,
lazy_loaded,
next_batchcount,
);
@ -866,14 +867,14 @@ async fn load_joined_room(
}
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
&sender_user,
&sender_device,
&room_id,
sender_user,
sender_device,
room_id,
&event.sender,
)? || lazy_load_send_redundant
{
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
&room_id,
room_id,
&StateEventType::RoomMember,
event.sender.as_str(),
)? {
@ -884,9 +885,9 @@ async fn load_joined_room(
}
services().rooms.lazy_loading.lazy_load_mark_sent(
&sender_user,
&sender_device,
&room_id,
sender_user,
sender_device,
room_id,
lazy_loaded,
next_batchcount,
);
@ -934,7 +935,7 @@ async fn load_joined_room(
match new_membership {
MembershipState::Join => {
// A new user joined an encrypted room
if !share_encrypted_room(&sender_user, &user_id, &room_id)? {
if !share_encrypted_room(sender_user, &user_id, room_id)? {
device_list_updates.insert(user_id);
}
}
@ -954,15 +955,15 @@ async fn load_joined_room(
services()
.rooms
.state_cache
.room_members(&room_id)
.room_members(room_id)
.flatten()
.filter(|user_id| {
// Don't send key updates from the sender to the sender
&sender_user != user_id
sender_user != user_id
})
.filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target already
!share_encrypted_room(&sender_user, user_id, &room_id)
!share_encrypted_room(sender_user, user_id, room_id)
.unwrap_or(false)
}),
);
@ -997,7 +998,7 @@ async fn load_joined_room(
services()
.rooms
.user
.notification_count(&sender_user, &room_id)?
.notification_count(sender_user, room_id)?
.try_into()
.expect("notification count can't go that high"),
)
@ -1010,7 +1011,7 @@ async fn load_joined_room(
services()
.rooms
.user
.highlight_count(&sender_user, &room_id)?
.highlight_count(sender_user, room_id)?
.try_into()
.expect("highlight count can't go that high"),
)
@ -1039,15 +1040,15 @@ async fn load_joined_room(
.rooms
.edus
.read_receipt
.readreceipts_since(&room_id, since)
.readreceipts_since(room_id, since)
.filter_map(|r| r.ok()) // Filter out buggy events
.map(|(_, _, v)| v)
.collect();
if services().rooms.edus.typing.last_typing_update(&room_id)? > since {
if services().rooms.edus.typing.last_typing_update(room_id)? > since {
edus.push(
serde_json::from_str(
&serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?)
&serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id)?)
.expect("event is valid, we just created it"),
)
.expect("event is valid, we just created it"),
@ -1056,7 +1057,7 @@ async fn load_joined_room(
// Save the state after this sync so we can send the correct state diff next sync
services().rooms.user.associate_token_shortstatehash(
&room_id,
room_id,
next_batch,
current_shortstatehash,
)?;
@ -1065,7 +1066,7 @@ async fn load_joined_room(
account_data: RoomAccountData {
events: services()
.account_data
.changes_since(Some(&room_id), &sender_user, since)?
.changes_since(Some(room_id), sender_user, since)?
.into_iter()
.filter_map(|(_, v)| {
serde_json::from_str(v.json().get())
@ -1110,13 +1111,13 @@ fn load_timeline(
if services()
.rooms
.timeline
.last_timeline_count(&sender_user, &room_id)?
.last_timeline_count(sender_user, room_id)?
> roomsincecount
{
let mut non_timeline_pdus = services()
.rooms
.timeline
.pdus_until(&sender_user, &room_id, PduCount::max())?
.pdus_until(sender_user, room_id, PduCount::max())?
.filter_map(|r| {
// Filter out buggy events
if r.is_err() {
@ -1172,7 +1173,6 @@ fn share_encrypted_room(
pub async fn sync_events_v4_route(
body: Ruma<sync_events::v4::Request>,
) -> Result<sync_events::v4::Response, RumaResponse<UiaaResponse>> {
dbg!(&body.body);
let sender_user = body.sender_user.expect("user is authenticated");
let sender_device = body.sender_device.expect("user is authenticated");
let mut body = body.body;
@ -1232,7 +1232,7 @@ pub async fn sync_events_v4_route(
for room_id in &all_joined_rooms {
let current_shortstatehash =
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
s
} else {
error!("Room {} has no state", room_id);
@ -1242,7 +1242,7 @@ pub async fn sync_events_v4_route(
let since_shortstatehash = services()
.rooms
.user
.get_token_shortstatehash(&room_id, globalsince)?;
.get_token_shortstatehash(room_id, globalsince)?;
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
.and_then(|shortstatehash| {
@ -1331,7 +1331,7 @@ pub async fn sync_events_v4_route(
if !share_encrypted_room(
&sender_user,
&user_id,
&room_id,
room_id,
)? {
device_list_changes.insert(user_id);
}
@ -1352,7 +1352,7 @@ pub async fn sync_events_v4_route(
services()
.rooms
.state_cache
.room_members(&room_id)
.room_members(room_id)
.flatten()
.filter(|user_id| {
// Don't send key updates from the sender to the sender
@ -1360,7 +1360,7 @@ pub async fn sync_events_v4_route(
})
.filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target already
!share_encrypted_room(&sender_user, user_id, &room_id)
!share_encrypted_room(&sender_user, user_id, room_id)
.unwrap_or(false)
}),
);
@ -1451,7 +1451,7 @@ pub async fn sync_events_v4_route(
}
sync_events::v4::SyncOp {
op: SlidingOp::Sync,
range: Some(r.clone()),
range: Some(r),
index: None,
room_ids,
room_id: None,
@ -1523,7 +1523,7 @@ pub async fn sync_events_v4_route(
let roomsincecount = PduCount::Normal(*roomsince);
let (timeline_pdus, limited) =
load_timeline(&sender_user, &room_id, roomsincecount, *timeline_limit)?;
load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?;
if roomsince != &0 && timeline_pdus.is_empty() {
continue;
@ -1555,63 +1555,58 @@ pub async fn sync_events_v4_route(
let required_state = required_state_request
.iter()
.map(|state| {
.flat_map(|state| {
services()
.rooms
.state_accessor
.room_state_get(&room_id, &state.0, &state.1)
.room_state_get(room_id, &state.0, &state.1)
.ok()
.flatten()
.map(|state| state.to_sync_state_event())
})
.filter_map(|r| r.ok())
.filter_map(|o| o)
.map(|state| state.to_sync_state_event())
.collect();
// Heroes
let heroes = services()
.rooms
.state_cache
.room_members(&room_id)
.room_members(room_id)
.filter_map(|r| r.ok())
.filter(|member| member != &sender_user)
.map(|member| {
Ok::<_, Error>(
services()
.rooms
.state_accessor
.get_member(&room_id, &member)?
.map(|memberevent| {
(
memberevent
.displayname
.unwrap_or_else(|| member.to_string()),
memberevent.avatar_url,
)
}),
)
.flat_map(|member| {
services()
.rooms
.state_accessor
.get_member(room_id, &member)
.ok()
.flatten()
.map(|memberevent| {
(
memberevent
.displayname
.unwrap_or_else(|| member.to_string()),
memberevent.avatar_url,
)
})
})
.filter_map(|r| r.ok())
.filter_map(|o| o)
.take(5)
.collect::<Vec<_>>();
let name = if heroes.len() > 1 {
let last = heroes[0].0.clone();
Some(
heroes[1..]
let name = match &heroes[..] {
[] => None,
[only] => Some(only.0.clone()),
[firsts @ .., last] => Some(
firsts
.iter()
.map(|h| h.0.clone())
.collect::<Vec<_>>()
.join(", ")
+ " and "
+ &last,
)
} else if heroes.len() == 1 {
Some(heroes[0].0.clone())
} else {
None
+ &last.0,
),
};
let avatar = if heroes.len() == 1 {
heroes[0].1.clone()
let avatar = if let [only] = &heroes[..] {
only.1.clone()
} else {
None
};
@ -1619,15 +1614,11 @@ pub async fn sync_events_v4_route(
rooms.insert(
room_id.clone(),
sync_events::v4::SlidingSyncRoom {
name: services()
.rooms
.state_accessor
.get_name(&room_id)?
.or_else(|| name),
name: services().rooms.state_accessor.get_name(room_id)?.or(name),
avatar: services()
.rooms
.state_accessor
.get_avatar(&room_id)?
.get_avatar(room_id)?
.map_or(avatar, |a| a.url),
initial: Some(roomsince == &0),
is_dm: None,
@ -1637,7 +1628,7 @@ pub async fn sync_events_v4_route(
services()
.rooms
.user
.highlight_count(&sender_user, &room_id)?
.highlight_count(&sender_user, room_id)?
.try_into()
.expect("notification count can't go that high"),
),
@ -1645,7 +1636,7 @@ pub async fn sync_events_v4_route(
services()
.rooms
.user
.notification_count(&sender_user, &room_id)?
.notification_count(&sender_user, room_id)?
.try_into()
.expect("notification count can't go that high"),
),
@ -1658,7 +1649,7 @@ pub async fn sync_events_v4_route(
(services()
.rooms
.state_cache
.room_joined_count(&room_id)?
.room_joined_count(room_id)?
.unwrap_or(0) as u32)
.into(),
),
@ -1666,7 +1657,7 @@ pub async fn sync_events_v4_route(
(services()
.rooms
.state_cache
.room_invited_count(&room_id)?
.room_invited_count(room_id)?
.unwrap_or(0) as u32)
.into(),
),
@ -1689,7 +1680,7 @@ pub async fn sync_events_v4_route(
let _ = tokio::time::timeout(duration, watcher).await;
}
Ok(dbg!(sync_events::v4::Response {
Ok(sync_events::v4::Response {
initial: globalsince == 0,
txn_id: body.txn_id.clone(),
pos: next_batch.to_string(),
@ -1744,5 +1735,5 @@ pub async fn sync_events_v4_route(
},
},
delta_token: None,
}))
})
}