improvement: make more things async
This commit is contained in:
parent
9b898248c7
commit
0bc03e90a1
7 changed files with 244 additions and 155 deletions
|
@ -691,7 +691,7 @@ pub async fn send_transaction_message_route(
|
|||
.roomid_mutex_federation
|
||||
.write()
|
||||
.unwrap()
|
||||
.entry(room_id.clone())
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let mutex_lock = mutex.lock().await;
|
||||
|
@ -1054,6 +1054,25 @@ pub(crate) async fn handle_incoming_pdu<'a>(
|
|||
}
|
||||
}
|
||||
|
||||
if let Some((time, tries)) = db
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&*prev_id)
|
||||
{
|
||||
// Exponential backoff
|
||||
let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
|
||||
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
|
||||
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
|
||||
}
|
||||
|
||||
if time.elapsed() < min_elapsed_duration {
|
||||
info!("Backing off from {}", prev_id);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if errors >= 5 {
|
||||
break;
|
||||
}
|
||||
|
@ -1068,7 +1087,6 @@ pub(crate) async fn handle_incoming_pdu<'a>(
|
|||
.write()
|
||||
.unwrap()
|
||||
.insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time));
|
||||
let event_id = pdu.event_id.clone();
|
||||
if let Err(e) = upgrade_outlier_to_timeline_pdu(
|
||||
pdu,
|
||||
json,
|
||||
|
@ -1081,7 +1099,21 @@ pub(crate) async fn handle_incoming_pdu<'a>(
|
|||
.await
|
||||
{
|
||||
errors += 1;
|
||||
warn!("Prev event {} failed: {}", event_id, e);
|
||||
warn!("Prev event {} failed: {}", prev_id, e);
|
||||
match db
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.write()
|
||||
.unwrap()
|
||||
.entry((*prev_id).to_owned())
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
}
|
||||
hash_map::Entry::Occupied(mut e) => {
|
||||
*e.get_mut() = (Instant::now(), e.get().1 + 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
let elapsed = start_time.elapsed();
|
||||
db.globals
|
||||
|
@ -1091,7 +1123,7 @@ pub(crate) async fn handle_incoming_pdu<'a>(
|
|||
.remove(&room_id.to_owned());
|
||||
warn!(
|
||||
"Handling prev event {} took {}m{}s",
|
||||
event_id,
|
||||
prev_id,
|
||||
elapsed.as_secs() / 60,
|
||||
elapsed.as_secs() % 60
|
||||
);
|
||||
|
@ -1321,8 +1353,11 @@ async fn upgrade_outlier_to_timeline_pdu(
|
|||
.pdu_shortstatehash(prev_event)
|
||||
.map_err(|_| "Failed talking to db".to_owned())?;
|
||||
|
||||
let state =
|
||||
prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash));
|
||||
let state = if let Some(shortstatehash) = prev_event_sstatehash {
|
||||
Some(db.rooms.state_full_ids(shortstatehash).await)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(Ok(mut state)) = state {
|
||||
info!("Using cached state");
|
||||
|
@ -1378,6 +1413,7 @@ async fn upgrade_outlier_to_timeline_pdu(
|
|||
let mut leaf_state: BTreeMap<_, _> = db
|
||||
.rooms
|
||||
.state_full_ids(sstatehash)
|
||||
.await
|
||||
.map_err(|_| "Failed to ask db for room state.".to_owned())?;
|
||||
|
||||
if let Some(state_key) = &prev_event.state_key {
|
||||
|
@ -1409,6 +1445,7 @@ async fn upgrade_outlier_to_timeline_pdu(
|
|||
|
||||
auth_chain_sets.push(
|
||||
get_auth_chain(room_id, starting_events, db)
|
||||
.await
|
||||
.map_err(|_| "Failed to load auth chain.".to_owned())?
|
||||
.collect(),
|
||||
);
|
||||
|
@ -1535,6 +1572,7 @@ async fn upgrade_outlier_to_timeline_pdu(
|
|||
let state_at_incoming_event =
|
||||
state_at_incoming_event.expect("we always set this to some above");
|
||||
|
||||
info!("Starting auth check");
|
||||
// 11. Check the auth of the event passes based on the state of the event
|
||||
let check_result = state_res::event_auth::auth_check(
|
||||
&room_version,
|
||||
|
@ -1554,7 +1592,7 @@ async fn upgrade_outlier_to_timeline_pdu(
|
|||
if !check_result {
|
||||
return Err("Event has failed auth check with state at the event.".into());
|
||||
}
|
||||
info!("Auth check succeeded.");
|
||||
info!("Auth check succeeded");
|
||||
|
||||
// We start looking at current room state now, so lets lock the room
|
||||
|
||||
|
@ -1570,6 +1608,7 @@ async fn upgrade_outlier_to_timeline_pdu(
|
|||
|
||||
// Now we calculate the set of extremities this room has after the incoming event has been
|
||||
// applied. We start with the previous extremities (aka leaves)
|
||||
info!("Calculating extremities");
|
||||
let mut extremities = db
|
||||
.rooms
|
||||
.get_pdu_leaves(room_id)
|
||||
|
@ -1585,28 +1624,7 @@ async fn upgrade_outlier_to_timeline_pdu(
|
|||
// Only keep those extremities were not referenced yet
|
||||
extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true)));
|
||||
|
||||
let current_sstatehash = db
|
||||
.rooms
|
||||
.current_shortstatehash(room_id)
|
||||
.map_err(|_| "Failed to load current state hash.".to_owned())?
|
||||
.expect("every room has state");
|
||||
|
||||
let current_state_ids = db
|
||||
.rooms
|
||||
.state_full_ids(current_sstatehash)
|
||||
.map_err(|_| "Failed to load room state.")?;
|
||||
|
||||
let auth_events = db
|
||||
.rooms
|
||||
.get_auth_events(
|
||||
room_id,
|
||||
&incoming_pdu.kind,
|
||||
&incoming_pdu.sender,
|
||||
incoming_pdu.state_key.as_deref(),
|
||||
&incoming_pdu.content,
|
||||
)
|
||||
.map_err(|_| "Failed to get_auth_events.".to_owned())?;
|
||||
|
||||
info!("Compressing state at event");
|
||||
let state_ids_compressed = state_at_incoming_event
|
||||
.iter()
|
||||
.map(|(shortstatekey, id)| {
|
||||
|
@ -1619,6 +1637,17 @@ async fn upgrade_outlier_to_timeline_pdu(
|
|||
// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it
|
||||
info!("Starting soft fail auth check");
|
||||
|
||||
let auth_events = db
|
||||
.rooms
|
||||
.get_auth_events(
|
||||
room_id,
|
||||
&incoming_pdu.kind,
|
||||
&incoming_pdu.sender,
|
||||
incoming_pdu.state_key.as_deref(),
|
||||
&incoming_pdu.content,
|
||||
)
|
||||
.map_err(|_| "Failed to get_auth_events.".to_owned())?;
|
||||
|
||||
let soft_fail = !state_res::event_auth::auth_check(
|
||||
&room_version,
|
||||
&incoming_pdu,
|
||||
|
@ -1651,6 +1680,19 @@ async fn upgrade_outlier_to_timeline_pdu(
|
|||
}
|
||||
|
||||
if incoming_pdu.state_key.is_some() {
|
||||
info!("Loading current room state ids");
|
||||
let current_sstatehash = db
|
||||
.rooms
|
||||
.current_shortstatehash(room_id)
|
||||
.map_err(|_| "Failed to load current state hash.".to_owned())?
|
||||
.expect("every room has state");
|
||||
|
||||
let current_state_ids = db
|
||||
.rooms
|
||||
.state_full_ids(current_sstatehash)
|
||||
.await
|
||||
.map_err(|_| "Failed to load room state.")?;
|
||||
|
||||
info!("Preparing for stateres to derive new room state");
|
||||
let mut extremity_sstatehashes = HashMap::new();
|
||||
|
||||
|
@ -1738,6 +1780,7 @@ async fn upgrade_outlier_to_timeline_pdu(
|
|||
state.iter().map(|(_, id)| id.clone()).collect(),
|
||||
db,
|
||||
)
|
||||
.await
|
||||
.map_err(|_| "Failed to load auth chain.".to_owned())?
|
||||
.collect(),
|
||||
);
|
||||
|
@ -1899,11 +1942,17 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
|
|||
let mut todo_auth_events = vec![Arc::clone(id)];
|
||||
let mut events_in_reverse_order = Vec::new();
|
||||
let mut events_all = HashSet::new();
|
||||
let mut i = 0;
|
||||
while let Some(next_id) = todo_auth_events.pop() {
|
||||
if events_all.contains(&next_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
i += 1;
|
||||
if i % 100 == 0 {
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
|
||||
if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) {
|
||||
trace!("Found {} in db", id);
|
||||
continue;
|
||||
|
@ -2242,7 +2291,7 @@ fn append_incoming_pdu<'a>(
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip(starting_events, db))]
|
||||
pub(crate) fn get_auth_chain<'a>(
|
||||
pub(crate) async fn get_auth_chain<'a>(
|
||||
room_id: &RoomId,
|
||||
starting_events: Vec<Arc<EventId>>,
|
||||
db: &'a Database,
|
||||
|
@ -2251,10 +2300,15 @@ pub(crate) fn get_auth_chain<'a>(
|
|||
|
||||
let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS];
|
||||
|
||||
let mut i = 0;
|
||||
for id in starting_events {
|
||||
let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?;
|
||||
let bucket_id = (short % NUM_BUCKETS as u64) as usize;
|
||||
buckets[bucket_id].insert((short, id.clone()));
|
||||
i += 1;
|
||||
if i % 100 == 0 {
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
}
|
||||
|
||||
let mut full_auth_chain = HashSet::new();
|
||||
|
@ -2277,6 +2331,7 @@ pub(crate) fn get_auth_chain<'a>(
|
|||
let mut chunk_cache = HashSet::new();
|
||||
let mut hits2 = 0;
|
||||
let mut misses2 = 0;
|
||||
let mut i = 0;
|
||||
for (sevent_id, event_id) in chunk {
|
||||
if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? {
|
||||
hits2 += 1;
|
||||
|
@ -2292,6 +2347,11 @@ pub(crate) fn get_auth_chain<'a>(
|
|||
auth_chain.len()
|
||||
);
|
||||
chunk_cache.extend(auth_chain.iter());
|
||||
|
||||
i += 1;
|
||||
if i % 100 == 0 {
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
};
|
||||
}
|
||||
println!(
|
||||
|
@ -2512,7 +2572,7 @@ pub async fn get_event_authorization_route(
|
|||
let room_id = <&RoomId>::try_from(room_id_str)
|
||||
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
|
||||
|
||||
let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db)?;
|
||||
let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db).await?;
|
||||
|
||||
Ok(get_event_authorization::v1::Response {
|
||||
auth_chain: auth_chain_ids
|
||||
|
@ -2557,7 +2617,8 @@ pub async fn get_room_state_route(
|
|||
|
||||
let pdus = db
|
||||
.rooms
|
||||
.state_full_ids(shortstatehash)?
|
||||
.state_full_ids(shortstatehash)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|(_, id)| {
|
||||
PduEvent::convert_to_outgoing_federation_event(
|
||||
|
@ -2566,7 +2627,8 @@ pub async fn get_room_state_route(
|
|||
})
|
||||
.collect();
|
||||
|
||||
let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?;
|
||||
let auth_chain_ids =
|
||||
get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?;
|
||||
|
||||
Ok(get_room_state::v1::Response {
|
||||
auth_chain: auth_chain_ids
|
||||
|
@ -2616,12 +2678,14 @@ pub async fn get_room_state_ids_route(
|
|||
|
||||
let pdu_ids = db
|
||||
.rooms
|
||||
.state_full_ids(shortstatehash)?
|
||||
.state_full_ids(shortstatehash)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|(_, id)| (*id).to_owned())
|
||||
.collect();
|
||||
|
||||
let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?;
|
||||
let auth_chain_ids =
|
||||
get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?;
|
||||
|
||||
Ok(get_room_state_ids::v1::Response {
|
||||
auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(),
|
||||
|
@ -2927,12 +2991,13 @@ async fn create_join_event(
|
|||
))?;
|
||||
drop(mutex_lock);
|
||||
|
||||
let state_ids = db.rooms.state_full_ids(shortstatehash)?;
|
||||
let state_ids = db.rooms.state_full_ids(shortstatehash).await?;
|
||||
let auth_chain_ids = get_auth_chain(
|
||||
room_id,
|
||||
state_ids.iter().map(|(_, id)| id.clone()).collect(),
|
||||
db,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
let servers = db
|
||||
.rooms
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue