conduwuit/src/database/key_value/rooms/timeline.rs

368 lines
12 KiB
Rust
Raw Normal View History

use std::{collections::hash_map, mem::size_of, sync::Arc};
2022-10-05 20:34:31 +02:00
use ruma::{
2022-10-09 17:25:06 +02:00
api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId,
2022-10-05 20:34:31 +02:00
};
use tracing::error;
2022-10-05 20:34:31 +02:00
use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result};
2023-02-20 22:59:45 +01:00
use service::rooms::timeline::PduCount;
2022-10-05 18:36:12 +02:00
impl service::rooms::timeline::Data for KeyValueDatabase {
2023-02-20 22:59:45 +01:00
fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result<PduCount> {
2022-06-19 22:56:14 +02:00
match self
.lasttimelinecount_cache
2021-08-15 06:46:00 +02:00
.lock()
.unwrap()
2022-06-19 22:56:14 +02:00
.entry(room_id.to_owned())
2021-08-15 06:46:00 +02:00
{
2022-06-19 22:56:14 +02:00
hash_map::Entry::Vacant(v) => {
if let Some(last_count) = self
2023-02-20 22:59:45 +01:00
.pdus_until(sender_user, room_id, PduCount::max())?
.find_map(|r| {
2022-06-19 22:56:14 +02:00
// Filter out buggy events
if r.is_err() {
error!("Bad pdu in pdus_since: {:?}", r);
}
r.ok()
})
{
2023-02-20 22:59:45 +01:00
Ok(*v.insert(last_count.0))
2022-06-19 22:56:14 +02:00
} else {
2023-02-20 22:59:45 +01:00
Ok(PduCount::Normal(0))
2021-08-14 19:07:50 +02:00
}
2021-08-12 23:04:00 +02:00
}
2022-06-19 22:56:14 +02:00
hash_map::Entry::Occupied(o) => Ok(*o.get()),
2021-08-12 23:04:00 +02:00
}
2022-06-19 22:56:14 +02:00
}
2021-08-12 23:04:00 +02:00
2022-06-19 22:56:14 +02:00
/// Returns the `count` of this pdu's id.
2023-02-20 22:59:45 +01:00
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> {
Ok(self
.eventid_pduid
2022-06-19 22:56:14 +02:00
.get(event_id.as_bytes())?
.map(|pdu_id| pdu_count(&pdu_id))
.transpose()?)
}
2022-06-19 22:56:14 +02:00
/// Returns the json of a pdu.
fn get_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
2023-02-21 00:56:26 +01:00
self.get_non_outlier_pdu_json(event_id)?.map_or_else(
|| {
self.eventid_outlierpdu
.get(event_id.as_bytes())?
.map(|pdu| {
serde_json::from_slice(&pdu)
.map_err(|_| Error::bad_database("Invalid PDU in db."))
})
.transpose()
},
|x| Ok(Some(x)),
)
2021-08-12 23:04:00 +02:00
}
2022-06-19 22:56:14 +02:00
/// Returns the json of a pdu.
2022-10-05 20:34:31 +02:00
fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
2021-07-15 19:54:04 +02:00
self.eventid_pduid
.get(event_id.as_bytes())?
.map(|pduid| {
self.pduid_pdu
.get(&pduid)?
.ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid."))
})
.transpose()?
2021-07-15 19:54:04 +02:00
.map(|pdu| {
serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))
})
.transpose()
}
2020-05-26 10:27:51 +02:00
/// Returns the pdu's id.
fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
Ok(self.eventid_pduid.get(event_id.as_bytes())?)
2020-05-26 10:27:51 +02:00
}
2021-03-26 11:10:45 +01:00
/// Returns the pdu.
///
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> {
2021-03-26 11:10:45 +01:00
self.eventid_pduid
.get(event_id.as_bytes())?
.map(|pduid| {
self.pduid_pdu
.get(&pduid)?
.ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid."))
})
.transpose()?
2021-03-26 11:10:45 +01:00
.map(|pdu| {
2021-06-17 20:34:14 +02:00
serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))
2021-03-26 11:10:45 +01:00
})
.transpose()
}
2020-05-24 18:25:52 +02:00
/// Returns the pdu.
2021-02-01 12:44:30 -05:00
///
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
fn get_pdu(&self, event_id: &EventId) -> Result<Option<Arc<PduEvent>>> {
if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) {
2021-06-30 09:52:01 +02:00
return Ok(Some(Arc::clone(p)));
}
if let Some(pdu) = self
2023-02-21 00:56:26 +01:00
.get_non_outlier_pdu(event_id)?
.map_or_else(
2023-02-21 00:56:26 +01:00
|| {
self.eventid_outlierpdu
.get(event_id.as_bytes())?
.map(|pdu| {
serde_json::from_slice(&pdu)
.map_err(|_| Error::bad_database("Invalid PDU in db."))
})
.transpose()
},
2023-02-21 00:56:26 +01:00
|x| Ok(Some(x)),
)?
2023-02-21 00:56:26 +01:00
.map(Arc::new)
2021-06-30 09:52:01 +02:00
{
self.pdu_cache
2021-07-18 20:43:39 +02:00
.lock()
2021-06-30 09:52:01 +02:00
.unwrap()
2021-11-26 20:36:40 +01:00
.insert(event_id.to_owned(), Arc::clone(&pdu));
2021-06-30 09:52:01 +02:00
Ok(Some(pdu))
} else {
Ok(None)
}
2020-05-24 18:25:52 +02:00
}
2020-05-26 10:27:51 +02:00
/// Returns the pdu.
///
/// This does __NOT__ check the outliers `Tree`.
fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result<Option<PduEvent>> {
self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| {
Ok(Some(
serde_json::from_slice(&pdu)
.map_err(|_| Error::bad_database("Invalid PDU in db."))?,
))
})
2020-05-26 10:27:51 +02:00
}
/// Returns the pdu as a `BTreeMap<String, CanonicalJsonValue>`.
fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result<Option<CanonicalJsonObject>> {
self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| {
Ok(Some(
serde_json::from_slice(&pdu)
.map_err(|_| Error::bad_database("Invalid PDU in db."))?,
))
})
2020-09-15 16:13:54 +02:00
}
2022-10-05 20:34:31 +02:00
fn append_pdu(
&self,
pdu_id: &[u8],
pdu: &PduEvent,
json: &CanonicalJsonObject,
count: u64,
) -> Result<()> {
2022-10-05 20:33:55 +02:00
self.pduid_pdu.insert(
pdu_id,
2022-10-05 20:34:31 +02:00
&serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"),
)?;
2022-10-05 20:33:55 +02:00
self.lasttimelinecount_cache
.lock()
.unwrap()
2023-02-20 22:59:45 +01:00
.insert(pdu.room_id.clone(), PduCount::Normal(count));
2022-10-05 20:33:55 +02:00
2022-10-10 14:09:11 +02:00
self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?;
2022-10-05 20:33:55 +02:00
self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?;
Ok(())
}
2023-02-20 22:59:45 +01:00
fn prepend_backfill_pdu(
&self,
pdu_id: &[u8],
event_id: &EventId,
json: &CanonicalJsonObject,
) -> Result<()> {
self.pduid_pdu.insert(
2023-02-20 22:59:45 +01:00
pdu_id,
&serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"),
)?;
self.eventid_pduid.insert(event_id.as_bytes(), pdu_id)?;
2023-02-20 22:59:45 +01:00
self.eventid_outlierpdu.remove(event_id.as_bytes())?;
Ok(())
}
2020-06-09 15:13:17 +02:00
/// Removes a pdu and creates a new one with the same id.
2023-06-25 19:31:40 +02:00
fn replace_pdu(
&self,
pdu_id: &[u8],
pdu_json: &CanonicalJsonObject,
pdu: &PduEvent,
) -> Result<()> {
if self.pduid_pdu.get(pdu_id)?.is_some() {
2020-06-09 15:13:17 +02:00
self.pduid_pdu.insert(
pdu_id,
2023-06-25 19:31:40 +02:00
&serde_json::to_vec(pdu_json).expect("CanonicalJsonObject is always a valid"),
2022-10-05 20:34:31 +02:00
)?;
2020-05-26 10:27:51 +02:00
} else {
2023-06-25 19:31:40 +02:00
return Err(Error::BadRequest(
2020-06-09 15:13:17 +02:00
ErrorKind::NotFound,
"PDU does not exist.",
2023-06-25 19:31:40 +02:00
));
2020-05-26 10:27:51 +02:00
}
2023-06-25 19:31:40 +02:00
self.pdu_cache
.lock()
.unwrap()
.remove(&(*pdu.event_id).to_owned());
Ok(())
2020-05-26 10:27:51 +02:00
}
2020-05-24 18:25:52 +02:00
2020-07-26 17:34:12 +02:00
/// Returns an iterator over all events and their tokens in a room that happened before the
/// event with id `until` in reverse-chronological order.
fn pdus_until<'a>(
2021-06-08 18:10:00 +02:00
&'a self,
2020-06-16 12:11:38 +02:00
user_id: &UserId,
room_id: &RoomId,
2023-02-20 22:59:45 +01:00
until: PduCount,
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
let (prefix, current) = count_to_id(&room_id, until, 1, true)?;
2021-11-26 20:36:40 +01:00
let user_id = user_id.to_owned();
2021-08-12 23:04:00 +02:00
Ok(Box::new(
self.pduid_pdu
.iter_from(&current, true)
.take_while(move |(k, _)| k.starts_with(&prefix))
.map(move |(pdu_id, v)| {
let mut pdu = serde_json::from_slice::<PduEvent>(&v)
.map_err(|_| Error::bad_database("PDU in db is invalid."))?;
if pdu.sender != user_id {
pdu.remove_transaction_id()?;
}
pdu.add_age()?;
let count = pdu_count(&pdu_id)?;
Ok((count, pdu))
}),
))
}
fn pdus_after<'a>(
2021-06-08 18:10:00 +02:00
&'a self,
2020-06-16 12:11:38 +02:00
user_id: &UserId,
2020-06-04 13:58:55 +02:00
room_id: &RoomId,
2023-02-20 22:59:45 +01:00
from: PduCount,
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
let (prefix, current) = count_to_id(&room_id, from, 1, false)?;
2020-06-04 13:58:55 +02:00
2021-11-26 20:36:40 +01:00
let user_id = user_id.to_owned();
2021-08-12 23:04:00 +02:00
Ok(Box::new(
self.pduid_pdu
.iter_from(&current, false)
.take_while(move |(k, _)| k.starts_with(&prefix))
.map(move |(pdu_id, v)| {
let mut pdu = serde_json::from_slice::<PduEvent>(&v)
.map_err(|_| Error::bad_database("PDU in db is invalid."))?;
if pdu.sender != user_id {
pdu.remove_transaction_id()?;
}
pdu.add_age()?;
let count = pdu_count(&pdu_id)?;
Ok((count, pdu))
}),
))
2020-06-04 13:58:55 +02:00
}
2022-10-05 20:33:55 +02:00
2022-10-05 20:34:31 +02:00
fn increment_notification_counts(
&self,
room_id: &RoomId,
2022-10-09 17:25:06 +02:00
notifies: Vec<OwnedUserId>,
highlights: Vec<OwnedUserId>,
2022-10-05 20:34:31 +02:00
) -> Result<()> {
2022-10-08 13:02:52 +02:00
let mut notifies_batch = Vec::new();
let mut highlights_batch = Vec::new();
2022-10-05 20:33:55 +02:00
for user in notifies {
let mut userroom_id = user.as_bytes().to_vec();
userroom_id.push(0xff);
userroom_id.extend_from_slice(room_id.as_bytes());
notifies_batch.push(userroom_id);
}
for user in highlights {
let mut userroom_id = user.as_bytes().to_vec();
userroom_id.push(0xff);
userroom_id.extend_from_slice(room_id.as_bytes());
highlights_batch.push(userroom_id);
}
self.userroomid_notificationcount
.increment_batch(&mut notifies_batch.into_iter())?;
self.userroomid_highlightcount
.increment_batch(&mut highlights_batch.into_iter())?;
Ok(())
}
}
2023-02-20 22:59:45 +01:00
/// Returns the `count` of this pdu's id.
fn pdu_count(pdu_id: &[u8]) -> Result<PduCount> {
let last_u64 = utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::<u64>()..])
.map_err(|_| Error::bad_database("PDU has invalid count bytes."))?;
let second_last_u64 = utils::u64_from_bytes(
&pdu_id[pdu_id.len() - 2 * size_of::<u64>()..pdu_id.len() - size_of::<u64>()],
);
if matches!(second_last_u64, Ok(0)) {
Ok(PduCount::Backfilled(u64::MAX - last_u64))
} else {
Ok(PduCount::Normal(last_u64))
}
}
fn count_to_id(
room_id: &RoomId,
count: PduCount,
offset: u64,
subtract: bool,
) -> Result<(Vec<u8>, Vec<u8>)> {
let prefix = services()
.rooms
.short
.get_shortroomid(room_id)?
.expect("room exists")
.to_be_bytes()
.to_vec();
let mut pdu_id = prefix.clone();
// +1 so we don't send the base event
let count_raw = match count {
PduCount::Normal(x) => {
if subtract {
x - offset
} else {
x + offset
}
}
PduCount::Backfilled(x) => {
pdu_id.extend_from_slice(&0_u64.to_be_bytes());
let num = u64::MAX - x;
if subtract {
if num > 0 {
num - offset
} else {
num
}
} else {
num + offset
}
}
};
pdu_id.extend_from_slice(&count_raw.to_be_bytes());
Ok((prefix, pdu_id))
2023-02-20 22:59:45 +01:00
}