diff --git a/.github/workflows/integration_test.yml b/.github/workflows/integration_test.yml index 2e31d35ee..879999007 100644 --- a/.github/workflows/integration_test.yml +++ b/.github/workflows/integration_test.yml @@ -56,6 +56,13 @@ jobs: sed -i '/image: appflowyinc\/admin_frontend:/d' docker-compose.yml cat docker-compose.yml + - uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + - name: Run Docker-Compose run: | docker compose up -d diff --git a/Cargo.lock b/Cargo.lock index 99007ca0f..9f9220d0b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1388,7 +1388,7 @@ dependencies = [ [[package]] name = "collab" version = "0.1.0" -source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d4c4ed1ed1eb115f3d578bf7cf65b5a769c18b0f#d4c4ed1ed1eb115f3d578bf7cf65b5a769c18b0f" +source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=46911ccbf321f7ad19b0e547e38edfd9fece48a9#46911ccbf321f7ad19b0e547e38edfd9fece48a9" dependencies = [ "anyhow", "async-trait", @@ -1412,7 +1412,7 @@ dependencies = [ [[package]] name = "collab-document" version = "0.1.0" -source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d4c4ed1ed1eb115f3d578bf7cf65b5a769c18b0f#d4c4ed1ed1eb115f3d578bf7cf65b5a769c18b0f" +source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=46911ccbf321f7ad19b0e547e38edfd9fece48a9#46911ccbf321f7ad19b0e547e38edfd9fece48a9" dependencies = [ "anyhow", "collab", @@ -1431,7 +1431,7 @@ dependencies = [ [[package]] name = "collab-entity" version = "0.1.0" -source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d4c4ed1ed1eb115f3d578bf7cf65b5a769c18b0f#d4c4ed1ed1eb115f3d578bf7cf65b5a769c18b0f" +source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=46911ccbf321f7ad19b0e547e38edfd9fece48a9#46911ccbf321f7ad19b0e547e38edfd9fece48a9" dependencies = [ "anyhow", "bytes", @@ -1446,7 +1446,7 @@ dependencies = [ [[package]] name = "collab-folder" version = "0.1.0" -source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d4c4ed1ed1eb115f3d578bf7cf65b5a769c18b0f#d4c4ed1ed1eb115f3d578bf7cf65b5a769c18b0f" +source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=46911ccbf321f7ad19b0e547e38edfd9fece48a9#46911ccbf321f7ad19b0e547e38edfd9fece48a9" dependencies = [ "anyhow", "chrono", diff --git a/Cargo.toml b/Cargo.toml index ce4063c77..6e2a445c7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -182,10 +182,10 @@ inherits = "release" debug = true [patch.crates-io] -collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d4c4ed1ed1eb115f3d578bf7cf65b5a769c18b0f" } -collab-entity = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d4c4ed1ed1eb115f3d578bf7cf65b5a769c18b0f" } -collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d4c4ed1ed1eb115f3d578bf7cf65b5a769c18b0f" } -collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d4c4ed1ed1eb115f3d578bf7cf65b5a769c18b0f" } +collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "46911ccbf321f7ad19b0e547e38edfd9fece48a9" } +collab-entity = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "46911ccbf321f7ad19b0e547e38edfd9fece48a9" } +collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "46911ccbf321f7ad19b0e547e38edfd9fece48a9" } +collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "46911ccbf321f7ad19b0e547e38edfd9fece48a9" } [features] custom_env= [] diff --git a/libs/client-api-test-util/src/test_client.rs b/libs/client-api-test-util/src/test_client.rs index 7ce3e510b..1e2c4527b 100644 --- a/libs/client-api-test-util/src/test_client.rs +++ b/libs/client-api-test-util/src/test_client.rs @@ -806,7 +806,7 @@ pub async fn assert_client_collab_include_value( object_id: &str, expected: Value, ) -> Result<(), Error> { - let secs = 30; + let secs = 60; let object_id = object_id.to_string(); let mut retry_count = 0; loop { diff --git a/libs/client-api/src/collab_sync/sink.rs b/libs/client-api/src/collab_sync/collab_sink.rs similarity index 74% rename from libs/client-api/src/collab_sync/sink.rs rename to libs/client-api/src/collab_sync/collab_sink.rs index cb838f05d..b785ac220 100644 --- a/libs/client-api/src/collab_sync/sink.rs +++ b/libs/client-api/src/collab_sync/collab_sink.rs @@ -1,22 +1,22 @@ use crate::af_spawn; -use crate::collab_sync::sink_config::SinkConfig; +use crate::collab_sync::collab_stream::{check_update_contiguous, SeqNumCounter}; +use crate::collab_sync::ping::PingSyncRunner; use crate::collab_sync::sink_queue::{QueueItem, SinkQueue}; -use crate::collab_sync::{check_update_contiguous, SyncError, SyncObject}; +use crate::collab_sync::{SinkConfig, SyncError, SyncObject}; +use collab::core::origin::{CollabClient, CollabOrigin}; +use collab_rt_entity::{ClientCollabMessage, MsgId, ServerCollabMessage, SinkMessage}; use futures_util::SinkExt; - -use collab_rt_entity::{MsgId, ServerCollabMessage, SinkMessage}; use std::collections::{HashMap, HashSet}; -use std::marker::PhantomData; -use std::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering}; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::{Arc, Weak}; use std::time::{Duration, Instant}; -use tokio::sync::{watch, Mutex}; + +use tokio::sync::{broadcast, watch, Mutex}; use tokio::time::{interval, sleep}; use tracing::{error, trace, warn}; #[derive(Clone, Debug)] pub enum SinkState { - Init, /// The sink is syncing the messages to the remote. Syncing, /// All the messages are synced to the remote. @@ -25,10 +25,6 @@ pub enum SinkState { } impl SinkState { - pub fn is_init(&self) -> bool { - matches!(self, SinkState::Init) - } - pub fn is_syncing(&self) -> bool { matches!(self, SinkState::Syncing) } @@ -41,12 +37,11 @@ pub enum SinkSignal { ProcessAfterMillis(u64), } -const SEND_INTERVAL: Duration = Duration::from_secs(8); +pub(crate) const SEND_INTERVAL: Duration = Duration::from_secs(8); pub const COLLAB_SINK_DELAY_MILLIS: u64 = 500; -/// Use to sync the [Msg] to the remote. -pub struct CollabSink { +pub struct CollabSink { #[allow(dead_code)] uid: i64, /// The [Sink] is used to send the messages to the remote. It might be a websocket sink or @@ -54,66 +49,83 @@ pub struct CollabSink { sender: Arc>, /// The [SinkQueue] is used to queue the messages that are waiting to be sent to the /// remote. It will merge the messages if possible. - message_queue: Arc>>, + message_queue: Arc>>, msg_id_counter: Arc, /// The [watch::Sender] is used to notify the [CollabSinkRunner] to process the pending messages. /// Sending `false` will stop the [CollabSinkRunner]. notifier: Arc>, config: SinkConfig, - state_notifier: Arc>, + sync_state_tx: broadcast::Sender, pause: AtomicBool, object: SyncObject, flying_messages: Arc>>, - last_sync: Arc, + last_sync: Arc, + pause_ping: Arc, } -impl Drop for CollabSink { +impl Drop for CollabSink { fn drop(&mut self) { trace!("Drop CollabSink {}", self.object.object_id); let _ = self.notifier.send(SinkSignal::Stop); } } -impl CollabSink +impl CollabSink where E: Into + Send + Sync + 'static, - Sink: SinkExt, Error = E> + Send + Sync + Unpin + 'static, - Msg: SinkMessage, + Sink: SinkExt, Error = E> + Send + Sync + Unpin + 'static, { pub fn new( uid: i64, object: SyncObject, sink: Sink, notifier: watch::Sender, - sync_state_tx: watch::Sender, + sync_state_tx: broadcast::Sender, config: SinkConfig, pause: bool, ) -> Self { let msg_id_counter = DefaultMsgIdCounter::new(); let notifier = Arc::new(notifier); - let state_notifier = Arc::new(sync_state_tx); let sender = Arc::new(Mutex::new(sink)); - let msg_queue = SinkQueue::new(uid); + let msg_queue = SinkQueue::new(); let message_queue = Arc::new(parking_lot::Mutex::new(msg_queue)); let msg_id_counter = Arc::new(msg_id_counter); let flying_messages = Arc::new(parking_lot::Mutex::new(HashSet::new())); + let pause_ping = Arc::new(AtomicBool::new(false)); - let last_sync = Arc::new(LastSync::new()); + let last_sync = Arc::new(SyncTimestamp::new()); let mut interval = interval(SEND_INTERVAL); let weak_notifier = Arc::downgrade(¬ifier); let weak_flying_messages = Arc::downgrade(&flying_messages); + + let origin = CollabOrigin::Client(CollabClient { + uid, + device_id: object.device_id.clone(), + }); + + PingSyncRunner::run( + origin, + object.object_id.clone(), + msg_id_counter.clone(), + Arc::downgrade(&message_queue), + pause_ping.clone(), + weak_notifier, + last_sync.clone(), + ); + let cloned_last_sync = last_sync.clone(); + let weak_notifier = Arc::downgrade(¬ifier); af_spawn(async move { // Initial delay to make sure the first tick waits for SEND_INTERVAL sleep(SEND_INTERVAL).await; - loop { interval.tick().await; match weak_notifier.upgrade() { Some(notifier) => { // Removing the flying messages allows for the re-sending of the top k messages in the message queue. if let Some(flying_messages) = weak_flying_messages.upgrade() { - if cloned_last_sync.should_clear().await { + // remove all the flying messages if the last sync is expired within the SEND_INTERVAL. + if cloned_last_sync.is_time_for_next_sync(SEND_INTERVAL).await { flying_messages.lock().clear(); } } @@ -133,12 +145,13 @@ where message_queue, msg_id_counter, notifier, - state_notifier, + sync_state_tx, config, pause: AtomicBool::new(pause), object, flying_messages, last_sync, + pause_ping, } } @@ -147,17 +160,13 @@ where /// its priority. And the message priority is determined by the [Msg] that implement the [Ord] and /// [PartialOrd] trait. Check out the [CollabMessage] for more details. /// - pub fn queue_msg(&self, f: impl FnOnce(MsgId) -> Msg) { - if !self.state_notifier.borrow().is_syncing() { - let _ = self.state_notifier.send(SinkState::Syncing); - } + pub fn queue_msg(&self, f: impl FnOnce(MsgId) -> ClientCollabMessage) { + let _ = self.sync_state_tx.send(SinkState::Syncing); let mut msg_queue = self.message_queue.lock(); let msg_id = self.msg_id_counter.next(); let new_msg = f(msg_id); - trace!("🔥 queue {}", new_msg); msg_queue.push_msg(msg_id, new_msg); - // msg_queue.extend(requeue_items); drop(msg_queue); self.merge(); @@ -169,10 +178,8 @@ where /// When queue the init message, the sink will clear all the pending messages and send the init /// message immediately. - pub fn queue_init_sync(&self, f: impl FnOnce(MsgId) -> Msg) { - if !self.state_notifier.borrow().is_syncing() { - let _ = self.state_notifier.send(SinkState::Syncing); - } + pub fn queue_init_sync(&self, f: impl FnOnce(MsgId) -> ClientCollabMessage) { + let _ = self.sync_state_tx.send(SinkState::Syncing); // Clear all the pending messages and send the init message immediately. self.clear(); @@ -181,7 +188,6 @@ where let mut msg_queue = self.message_queue.lock(); let msg_id = self.msg_id_counter.next(); let init_sync = f(msg_id); - trace!("🔥queue {}", init_sync); msg_queue.push_msg(msg_id, init_sync); let _ = self.notifier.send(SinkSignal::Proceed); } @@ -212,11 +218,13 @@ where } pub fn pause(&self) { + self.pause_ping.store(true, Ordering::SeqCst); self.pause.store(true, Ordering::SeqCst); - let _ = self.state_notifier.send(SinkState::Pause); + let _ = self.sync_state_tx.send(SinkState::Pause); } pub fn resume(&self) { + self.pause_ping.store(false, Ordering::SeqCst); self.pause.store(false, Ordering::SeqCst); } @@ -226,7 +234,7 @@ where &self, msg_id: MsgId, server_message: &ServerCollabMessage, - seq_num_counter: &Arc, + seq_num_counter: &Arc, ) -> Result { // safety: msg_id is not None let income_message_id = msg_id; @@ -257,31 +265,37 @@ where if is_valid { if let ServerCollabMessage::ClientAck(ack) = server_message { - let prev_seq_num = seq_num_counter - .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| Some(ack.seq_num)) - .unwrap(); - // Check the seq_num is contiguous. - check_update_contiguous(&self.object.object_id, ack.seq_num, prev_seq_num)?; + check_update_contiguous(&self.object.object_id, ack.seq_num, seq_num_counter)?; } } - trace!( - "{:?}: pending count:{} ids:{}", - self.object.object_id, - message_queue.len(), - message_queue - .iter() - .map(|item| item.msg_id().to_string()) - .collect::>() - .join(",") - ); + // Check if all non-ping messages have been sent + let all_non_ping_messages_sent = !message_queue + .iter() + .any(|item| !item.message().is_ping_sync()); - if message_queue.is_empty() { - if let Err(e) = self.state_notifier.send(SinkState::Finished) { - error!("send sink state failed: {}", e); + // If there are no non-ping messages left in the queue, it indicates all messages have been sent + if all_non_ping_messages_sent { + if let Err(err) = self.sync_state_tx.send(SinkState::Finished) { + error!( + "Failed to send SinkState::Finished for object_id '{}': {}", + self.object.object_id, err + ); } + } else { + trace!( + "{}: pending count:{} ids:{}", + self.object.object_id, + message_queue.len(), + message_queue + .iter() + .map(|item| item.msg_id().to_string()) + .collect::>() + .join(",") + ); } + Ok(is_valid) } @@ -312,7 +326,7 @@ where self.send_immediately(items).await; } - async fn send_immediately(&self, items: Vec>) { + async fn send_immediately(&self, items: Vec>) { let message_ids = items.iter().map(|item| item.msg_id()).collect::>(); let messages = items .into_iter() @@ -320,11 +334,11 @@ where .collect::>(); match self.sender.try_lock() { Ok(mut sender) => { - self.last_sync.tick().await; + self.last_sync.update_timestamp().await; match sender.send(messages).await { Ok(_) => { trace!( - "🔥 sending {} messages {:?}", + "🔥client sending {} messages {:?}", self.object.object_id, message_ids ); @@ -350,15 +364,11 @@ where } fn merge(&self) { - if self.config.disable_merge_message { - return; - } - if let (Some(flying_messages), Some(mut msg_queue)) = ( self.flying_messages.try_lock(), self.message_queue.try_lock(), ) { - let mut items: Vec> = Vec::with_capacity(msg_queue.len()); + let mut items: Vec> = Vec::with_capacity(msg_queue.len()); let mut merged_ids = HashMap::new(); while let Some(next) = msg_queue.pop() { // If the message is in the flying messages, it means the message is sending to the remote. @@ -410,14 +420,11 @@ where } } -fn get_next_batch_item( - object_id: &str, +fn get_next_batch_item( + _object_id: &str, flying_messages: &mut HashSet, - msg_queue: &mut SinkQueue, -) -> Vec> -where - Msg: SinkMessage, -{ + msg_queue: &mut SinkQueue, +) -> Vec> { let mut next_sending_items = vec![]; let mut requeue_items = vec![]; while let Some(item) = msg_queue.pop() { @@ -427,11 +434,6 @@ where } if flying_messages.contains(&item.msg_id()) { - trace!( - "{} message:{} is syncing to server, stop sync more messages", - object_id, - item.msg_id() - ); // because the messages in msg_queue are ordered by priority, so if the message is in the // flying messages, it means the message is sending to the remote. So don't send the following // messages. @@ -448,18 +450,17 @@ where } } } - - if !requeue_items.is_empty() { - trace!( - "requeue {} messages: ids=>{}", - object_id, - requeue_items - .iter() - .map(|item| { item.msg_id().to_string() }) - .collect::>() - .join(",") - ); - } + // if !requeue_items.is_empty() { + // trace!( + // "requeue {} messages: ids=>{}", + // object_id, + // requeue_items + // .iter() + // .map(|item| { item.msg_id().to_string() }) + // .collect::>() + // .join(",") + // ); + // } msg_queue.extend(requeue_items); let message_ids = next_sending_items .iter() @@ -475,21 +476,17 @@ fn retry_later(weak_notifier: Weak>) { } } -pub struct CollabSinkRunner(PhantomData); +pub struct CollabSinkRunner; -impl CollabSinkRunner { +impl CollabSinkRunner { /// The runner will stop if the [CollabSink] was dropped or the notifier was closed. pub async fn run( - weak_sink: Weak>, + weak_sink: Weak>, mut notifier: watch::Receiver, ) where E: Into + Send + Sync + 'static, - Sink: SinkExt, Error = E> + Send + Sync + Unpin + 'static, - Msg: SinkMessage, + Sink: SinkExt, Error = E> + Send + Sync + Unpin + 'static, { - if let Some(sink) = weak_sink.upgrade() { - sink.notify(); - } loop { // stops the runner if the notifier was closed. if notifier.changed().await.is_err() { @@ -526,30 +523,31 @@ impl DefaultMsgIdCounter { pub fn new() -> Self { Self::default() } - fn next(&self) -> MsgId { + pub(crate) fn next(&self) -> MsgId { self.0.fetch_add(1, Ordering::SeqCst) } } -struct LastSync { +pub(crate) struct SyncTimestamp { last_sync: Mutex, } -impl LastSync { +impl SyncTimestamp { fn new() -> Self { - let now = std::time::Instant::now(); - LastSync { + let now = Instant::now(); + SyncTimestamp { last_sync: Mutex::new(now.checked_sub(Duration::from_secs(60)).unwrap_or(now)), } } - async fn should_clear(&self) -> bool { - let now = std::time::Instant::now(); - now.duration_since(*self.last_sync.lock().await) > SEND_INTERVAL + /// Indicate the duration is passed since the last sync. The last sync timestamp will be updated + /// after sending a new message + pub async fn is_time_for_next_sync(&self, duration: Duration) -> bool { + Instant::now().duration_since(*self.last_sync.lock().await) > duration } - async fn tick(&self) { + async fn update_timestamp(&self) { let mut last_sync_locked = self.last_sync.lock().await; - *last_sync_locked = std::time::Instant::now(); + *last_sync_locked = Instant::now(); } } diff --git a/libs/client-api/src/collab_sync/collab_stream.rs b/libs/client-api/src/collab_sync/collab_stream.rs new file mode 100644 index 000000000..16cede037 --- /dev/null +++ b/libs/client-api/src/collab_sync/collab_stream.rs @@ -0,0 +1,395 @@ +use crate::af_spawn; +use crate::collab_sync::{ + start_sync, CollabSink, SyncError, SyncObject, NUMBER_OF_UPDATE_TRIGGER_INIT_SYNC, +}; +use anyhow::anyhow; +use bytes::Bytes; +use collab::core::collab::MutexCollab; +use collab::core::origin::CollabOrigin; +use collab_rt_entity::{ + AckCode, BroadcastSync, ClientCollabMessage, ServerCollabMessage, ServerInit, UpdateSync, +}; +use collab_rt_protocol::{handle_message, ClientSyncProtocol, Message, MessageReader, SyncMessage}; +use futures_util::{SinkExt, StreamExt}; +use std::marker::PhantomData; +use std::sync::atomic::{AtomicU32, Ordering}; +use std::sync::{Arc, Weak}; +use std::time::{Duration, Instant}; +use tokio::sync::Mutex; +use tracing::{error, instrument, trace, warn}; +use yrs::encoding::read::Cursor; +use yrs::updates::decoder::DecoderV1; + +const DEBOUNCE_DURATION: Duration = Duration::from_secs(10); + +/// Use to continuously receive updates from remote. +pub struct ObserveCollab { + object_id: String, + #[allow(dead_code)] + weak_collab: Weak, + phantom_sink: PhantomData, + phantom_stream: PhantomData, + // Use sequence number to check if the received updates/broadcasts are continuous. + #[allow(dead_code)] + seq_num_counter: Arc, +} + +impl Drop for ObserveCollab { + fn drop(&mut self) { + trace!("Drop SyncStream {}", self.object_id); + } +} + +impl ObserveCollab +where + E: Into + Send + Sync + 'static, + Sink: SinkExt, Error = E> + Send + Sync + Unpin + 'static, + Stream: StreamExt> + Send + Sync + Unpin + 'static, +{ + pub fn new( + origin: CollabOrigin, + object: SyncObject, + stream: Stream, + weak_collab: Weak, + sink: Weak>, + ) -> Self { + let last_init_sync = LastSyncTime::new(); + let object_id = object.object_id.clone(); + let cloned_weak_collab = weak_collab.clone(); + let seq_num_counter = Arc::new(SeqNumCounter::default()); + let cloned_seq_num_counter = seq_num_counter.clone(); + af_spawn(ObserveCollab::::observer_collab_message( + origin, + object, + stream, + cloned_weak_collab, + sink, + cloned_seq_num_counter, + last_init_sync, + )); + Self { + object_id, + weak_collab, + phantom_sink: Default::default(), + phantom_stream: Default::default(), + seq_num_counter, + } + } + + // Spawn the stream that continuously reads the doc's updates from remote. + async fn observer_collab_message( + origin: CollabOrigin, + object: SyncObject, + mut stream: Stream, + weak_collab: Weak, + weak_sink: Weak>, + seq_num_counter: Arc, + last_init_sync: LastSyncTime, + ) { + while let Some(collab_message_result) = stream.next().await { + let collab = match weak_collab.upgrade() { + Some(collab) => collab, + None => break, // Collab dropped, stop the stream. + }; + + let sink = match weak_sink.upgrade() { + Some(sink) => sink, + None => break, // Sink dropped, stop the stream. + }; + + let msg = match collab_message_result { + Ok(msg) => msg, + Err(err) => { + warn!( + "Stream error: {}, stop receive incoming changes", + err.into() + ); + break; + }, + }; + + if let Err(error) = ObserveCollab::::process_message( + &origin, + &object, + &collab, + &sink, + msg, + &seq_num_counter, + &last_init_sync, + ) + .await + { + if error.is_cannot_apply_update() { + // TODO(nathan): ask the client to resolve the conflict. + error!( + "collab:{} can not be synced because of error: {}", + object.object_id, error + ); + break; + } else { + error!("Error while processing message: {}", error); + } + } + } + } + + /// Continuously handle messages from the remote doc + async fn process_message( + origin: &CollabOrigin, + object: &SyncObject, + collab: &Arc, + sink: &Arc>, + msg: ServerCollabMessage, + seq_num_counter: &Arc, + last_init_time: &LastSyncTime, + ) -> Result<(), SyncError> { + // If server return the AckCode::ApplyInternalError, which means the server can not apply the + // update + if let ServerCollabMessage::ClientAck(ref ack) = msg { + if ack.code == AckCode::CannotApplyUpdate { + return Err(SyncError::CannotApplyUpdate(object.object_id.clone())); + } + } + + // msg_id will be None for [ServerBroadcast] or [ServerAwareness]. + match msg.msg_id() { + None => { + if let ServerCollabMessage::ServerBroadcast(ref data) = msg { + if let Err(err) = Self::validate_broadcast(object, data, seq_num_counter).await { + if err.is_missing_updates() { + Self::pull_missing_updates(origin, object, collab, sink, last_init_time).await; + return Ok(()); + } + } + } + Self::process_message_payload(&object.object_id, msg, collab, sink).await?; + sink.notify(); + Ok(()) + }, + Some(msg_id) => { + // Check if the message is acknowledged by the sink. + match sink.validate_response(msg_id, &msg, seq_num_counter).await { + Ok(is_valid) => { + if is_valid { + Self::process_message_payload(&object.object_id, msg, collab, sink).await?; + } + sink.notify(); + }, + Err(err) => { + // Update the last sync time if the message is valid. + if err.is_missing_updates() { + Self::pull_missing_updates(origin, object, collab, sink, last_init_time).await; + } else { + error!("Error while validating response: {}", err); + } + }, + } + Ok(()) + }, + } + } + + async fn process_message_payload( + object_id: &str, + msg: ServerCollabMessage, + collab: &Arc, + sink: &Arc>, + ) -> Result<(), SyncError> { + if !msg.payload().is_empty() { + let msg_origin = msg.origin(); + ObserveCollab::::process_payload( + msg_origin, + msg.payload(), + object_id, + collab, + sink, + ) + .await?; + } + Ok(()) + } + + #[instrument(level = "trace", skip_all)] + async fn pull_missing_updates( + origin: &CollabOrigin, + object: &SyncObject, + collab: &Arc, + sink: &Arc>, + last_sync_time: &LastSyncTime, + ) { + let debounce_duration = if cfg!(debug_assertions) { + Duration::from_secs(2) + } else { + DEBOUNCE_DURATION + }; + + if sink.can_queue_init_sync() + && last_sync_time + .can_proceed_with_sync(debounce_duration) + .await + { + if let Some(lock_guard) = collab.try_lock() { + trace!("Start pull missing updates for {}", object.object_id); + start_sync(origin.clone(), object, &lock_guard, sink); + } + } + } + + async fn validate_broadcast( + object: &SyncObject, + broadcast_sync: &BroadcastSync, + seq_num_counter: &Arc, + ) -> Result<(), SyncError> { + check_update_contiguous(&object.object_id, broadcast_sync.seq_num, seq_num_counter)?; + Ok(()) + } + + async fn process_payload( + origin: &CollabOrigin, + payload: &Bytes, + object_id: &str, + collab: &Arc, + sink: &Arc>, + ) -> Result<(), SyncError> { + if let Some(mut collab) = collab.try_lock() { + let mut decoder = DecoderV1::new(Cursor::new(payload)); + let reader = MessageReader::new(&mut decoder); + for msg in reader { + let msg = msg?; + let is_server_sync_step_1 = matches!(msg, Message::Sync(SyncMessage::SyncStep1(_))); + if let Some(payload) = handle_message(origin, &ClientSyncProtocol, &mut collab, msg)? { + let object_id = object_id.to_string(); + sink.queue_msg(|msg_id| { + if is_server_sync_step_1 { + ClientCollabMessage::new_server_init_sync(ServerInit::new( + origin.clone(), + object_id, + payload, + msg_id, + )) + } else { + ClientCollabMessage::new_update_sync(UpdateSync::new( + origin.clone(), + object_id, + payload, + msg_id, + )) + } + }); + } + } + } + Ok(()) + } +} + +struct LastSyncTime { + last_sync: Mutex, +} + +impl LastSyncTime { + fn new() -> Self { + let now = Instant::now(); + let one_hour = Duration::from_secs(3600); + // Use checked_sub to safely attempt subtraction, falling back to 'now' if underflow would occur + let one_hour_ago = now.checked_sub(one_hour).unwrap_or(now); + + LastSyncTime { + last_sync: Mutex::new(one_hour_ago), + } + } + + async fn can_proceed_with_sync(&self, debounce_duration: Duration) -> bool { + let now = Instant::now(); + let mut last_sync_locked = self.last_sync.lock().await; + if now.duration_since(*last_sync_locked) > debounce_duration { + *last_sync_locked = now; + true + } else { + false + } + } +} + +/// Check if the update is contiguous. +/// +/// when client send updates to the server, the seq_num should be increased otherwise which means the +/// sever might lack of some updates for given client. +pub(crate) fn check_update_contiguous( + object_id: &str, + current_seq_num: u32, + seq_num_counter: &Arc, +) -> Result<(), SyncError> { + let prev_seq_num = seq_num_counter.fetch_update(current_seq_num); + trace!( + "receive {} seq_num, prev:{}, current:{}", + object_id, + prev_seq_num, + current_seq_num, + ); + + // if the seq_num is 0, it means the client is just connected to the server. + if prev_seq_num == 0 && current_seq_num == 0 { + return Ok(()); + } + + if current_seq_num < prev_seq_num { + return Err(SyncError::Internal(anyhow!( + "{} invalid seq_num, prev:{}, current:{}", + object_id, + prev_seq_num, + current_seq_num, + ))); + } + + if current_seq_num > prev_seq_num + NUMBER_OF_UPDATE_TRIGGER_INIT_SYNC + || seq_num_counter.should_init_sync() + { + seq_num_counter.reset_counter(); + return Err(SyncError::MissingUpdates(format!( + "{} missing {} updates, should start init sync", + object_id, + current_seq_num - prev_seq_num, + ))); + } + Ok(()) +} + +#[derive(Default)] +pub struct SeqNumCounter { + pub counter: AtomicU32, + pub equal_counter: AtomicU32, +} + +impl SeqNumCounter { + pub fn fetch_update(&self, seq_num: u32) -> u32 { + match self + .counter + .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |current| { + if seq_num >= current { + Some(seq_num) + } else { + None + } + }) { + Ok(prev) => { + if prev == seq_num { + self.equal_counter.fetch_add(1, Ordering::SeqCst); + } else { + self.equal_counter.store(0, Ordering::SeqCst); + } + prev + }, + Err(prev) => prev, + } + } + + pub fn should_init_sync(&self) -> bool { + // when receive 8 continuous equal seq_num, we should start the init sync. + self.equal_counter.load(Ordering::SeqCst) >= 8 + } + + pub fn reset_counter(&self) { + self.equal_counter.store(0, Ordering::SeqCst); + } +} diff --git a/libs/client-api/src/collab_sync/mod.rs b/libs/client-api/src/collab_sync/mod.rs index 171b6659a..4c671f097 100644 --- a/libs/client-api/src/collab_sync/mod.rs +++ b/libs/client-api/src/collab_sync/mod.rs @@ -1,14 +1,15 @@ mod channel; +mod collab_sink; +mod collab_stream; mod error; +mod ping; mod plugin; -mod sink; -mod sink_config; mod sink_queue; mod sync_control; pub use channel::*; +pub use collab_rt_entity::{MsgId, ServerCollabMessage}; +pub use collab_sink::*; pub use error::*; pub use plugin::*; -pub use sink::*; -pub use sink_config::*; pub use sync_control::*; diff --git a/libs/client-api/src/collab_sync/ping.rs b/libs/client-api/src/collab_sync/ping.rs new file mode 100644 index 000000000..f7998c9d0 --- /dev/null +++ b/libs/client-api/src/collab_sync/ping.rs @@ -0,0 +1,66 @@ +use crate::collab_sync::sink_queue::SinkQueue; +use crate::collab_sync::{DefaultMsgIdCounter, SinkSignal, SyncTimestamp}; +use collab::core::origin::CollabOrigin; +use collab_rt_entity::{ClientCollabMessage, PingSync}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Weak}; +use std::time::Duration; +use tokio::sync::watch; +use tracing::warn; + +pub struct PingSyncRunner; + +impl PingSyncRunner { + pub(crate) fn run( + origin: CollabOrigin, + object_id: String, + msg_id_counter: Arc, + message_queue: Weak>>, + pause: Arc, + weak_notify: Weak>, + sync_timestamp: Arc, + ) { + let duration = Duration::from_secs(10); + tokio::spawn(async move { + let mut interval = tokio::time::interval(duration); + loop { + interval.tick().await; + match message_queue.upgrade() { + None => break, + Some(message_queue) => { + if pause.load(Ordering::SeqCst) { + continue; + } else { + // Skip this iteration if a message was sent recently, within the specified duration. + if !sync_timestamp.is_time_for_next_sync(duration).await { + continue; + } + + if let Some(mut queue) = message_queue.try_lock() { + if !queue.is_empty() { + continue; + } + + let msg_id = msg_id_counter.next(); + let ping = PingSync { + origin: origin.clone(), + object_id: object_id.clone(), + msg_id, + }; + let ping = ClientCollabMessage::ClientPingSync(ping); + queue.push_msg(msg_id, ping); + + if let Some(notify) = weak_notify.upgrade() { + if let Err(err) = notify.send(SinkSignal::Proceed) { + warn!("{} fail to send notify signal: {}", object_id, err); + break; + } + } + } + } + }, + } + } + }); + } +} diff --git a/libs/client-api/src/collab_sync/plugin.rs b/libs/client-api/src/collab_sync/plugin.rs index 4a4180a0b..fe0bf83f0 100644 --- a/libs/client-api/src/collab_sync/plugin.rs +++ b/libs/client-api/src/collab_sync/plugin.rs @@ -1,6 +1,7 @@ use collab::core::awareness::{AwarenessUpdate, Event}; use std::sync::{Arc, Weak}; +use crate::collab_sync::{SinkConfig, SinkState, SyncControl}; use collab::core::collab::MutexCollab; use collab::core::collab_state::SyncState; use collab::core::origin::CollabOrigin; @@ -10,13 +11,9 @@ use collab_rt_entity::{ClientCollabMessage, ServerCollabMessage, UpdateSync}; use collab_rt_protocol::{Message, SyncMessage}; use futures_util::SinkExt; use tokio_stream::StreamExt; - -use crate::collab_sync::SyncControl; -use tokio_stream::wrappers::WatchStream; use tracing::trace; use crate::af_spawn; -use crate::collab_sync::sink_config::SinkConfig; use crate::ws::{ConnectState, WSConnectStateReceiver}; use yrs::updates::encoder::Encode; @@ -53,7 +50,7 @@ where pause: bool, mut ws_connect_state: WSConnectStateReceiver, ) -> Self { - let weak_local_collab = collab.clone(); + let _weak_local_collab = collab.clone(); let sync_queue = SyncControl::new( object.clone(), origin, @@ -64,16 +61,23 @@ where pause, ); - let mut sync_state_stream = WatchStream::new(sync_queue.subscribe_sync_state()); - af_spawn(async move { - while let Some(new_state) = sync_state_stream.next().await { - if let Some(local_collab) = weak_local_collab.upgrade() { - if let Some(local_collab) = local_collab.try_lock() { - local_collab.set_sync_state(new_state); + if let Some(local_collab) = collab.upgrade() { + let mut sync_state_stream = sync_queue.subscribe_sync_state(); + let weak_state = Arc::downgrade(local_collab.lock().get_state()); + af_spawn(async move { + while let Ok(sink_state) = sync_state_stream.recv().await { + if let Some(state) = weak_state.upgrade() { + let sync_state = match sink_state { + SinkState::Syncing => SyncState::Syncing, + _ => SyncState::SyncFinished, + }; + state.set_sync_state(sync_state); + } else { + break; } } - } - }); + }); + } let sync_queue = Arc::new(sync_queue); let weak_local_collab = collab; @@ -113,11 +117,6 @@ where channel, } } - - pub fn subscribe_sync_state(&self) -> WatchStream { - let rx = self.sync_queue.subscribe_sync_state(); - WatchStream::new(rx) - } } impl CollabPlugin for SyncPlugin diff --git a/libs/client-api/src/collab_sync/sink_config.rs b/libs/client-api/src/collab_sync/sink_config.rs deleted file mode 100644 index befa369dd..000000000 --- a/libs/client-api/src/collab_sync/sink_config.rs +++ /dev/null @@ -1,32 +0,0 @@ -use crate::collab_sync::DEFAULT_SYNC_TIMEOUT; -use std::time::Duration; - -pub struct SinkConfig { - /// `timeout` is the time to wait for the remote to ack the message. If the remote - /// does not ack the message in time, the message will be sent again. - pub send_timeout: Duration, - /// `maximum_payload_size` is the maximum size of the messages to be merged. - pub maximum_payload_size: usize, - /// Default is false. If true, the sink will not merge messages. - pub disable_merge_message: bool, -} - -impl SinkConfig { - pub fn new() -> Self { - Self::default() - } - pub fn send_timeout(mut self, secs: u64) -> Self { - self.send_timeout = Duration::from_secs(secs); - self - } -} - -impl Default for SinkConfig { - fn default() -> Self { - Self { - send_timeout: Duration::from_secs(DEFAULT_SYNC_TIMEOUT), - maximum_payload_size: 1024 * 10, - disable_merge_message: false, - } - } -} diff --git a/libs/client-api/src/collab_sync/sink_queue.rs b/libs/client-api/src/collab_sync/sink_queue.rs index afc383591..2ad952015 100644 --- a/libs/client-api/src/collab_sync/sink_queue.rs +++ b/libs/client-api/src/collab_sync/sink_queue.rs @@ -1,13 +1,11 @@ use anyhow::Error; +use collab_rt_entity::{MsgId, SinkMessage}; use std::cmp::Ordering; use std::collections::BinaryHeap; use std::ops::{Deref, DerefMut}; - -use collab_rt_entity::{MsgId, SinkMessage}; +use tracing::trace; pub(crate) struct SinkQueue { - #[allow(dead_code)] - uid: i64, queue: BinaryHeap>, } @@ -15,14 +13,14 @@ impl SinkQueue where Msg: SinkMessage, { - pub(crate) fn new(uid: i64) -> Self { + pub(crate) fn new() -> Self { Self { - uid, queue: Default::default(), } } pub(crate) fn push_msg(&mut self, msg_id: MsgId, msg: Msg) { + trace!("📩 queue: {}", msg); self.queue.push(QueueItem::new(msg, msg_id)); } } @@ -50,7 +48,6 @@ where #[derive(Debug, Clone)] pub(crate) struct QueueItem { inner: Msg, - // TODO(nathan): user inner's msg_id msg_id: MsgId, } diff --git a/libs/client-api/src/collab_sync/sync_control.rs b/libs/client-api/src/collab_sync/sync_control.rs index da1aec7bb..4277fa5ea 100644 --- a/libs/client-api/src/collab_sync/sync_control.rs +++ b/libs/client-api/src/collab_sync/sync_control.rs @@ -1,52 +1,36 @@ use crate::af_spawn; -use crate::collab_sync::sink_config::SinkConfig; -use crate::collab_sync::{ - CollabSink, CollabSinkRunner, SinkSignal, SinkState, SyncError, SyncObject, -}; -use bytes::Bytes; +use crate::collab_sync::collab_stream::ObserveCollab; +use crate::collab_sync::{CollabSink, CollabSinkRunner, SinkSignal, SinkState, SyncObject}; use collab::core::awareness::Awareness; use collab::core::collab::MutexCollab; -use collab::core::collab_state::SyncState; use collab::core::origin::CollabOrigin; use collab::preclude::Collab; - -use anyhow::anyhow; -use collab_rt_entity::{ - AckCode, BroadcastSync, ClientCollabMessage, InitSync, ServerCollabMessage, ServerInit, - UpdateSync, -}; -use collab_rt_protocol::{handle_message, ClientSyncProtocol, CollabSyncProtocol}; -use collab_rt_protocol::{Message, MessageReader, SyncMessage}; +use collab_rt_entity::{ClientCollabMessage, InitSync, ServerCollabMessage}; +use collab_rt_protocol::{ClientSyncProtocol, CollabSyncProtocol}; use futures_util::{SinkExt, StreamExt}; -use std::marker::PhantomData; use std::ops::Deref; -use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::{Arc, Weak}; -use std::time::{Duration, Instant}; -use tokio::sync::{watch, Mutex}; -use tokio_stream::wrappers::WatchStream; -use tracing::{error, instrument, trace, warn}; -use yrs::encoding::read::Cursor; -use yrs::updates::decoder::DecoderV1; +use std::time::Duration; +use tokio::sync::{broadcast, watch}; + +use tracing::trace; use yrs::updates::encoder::{Encoder, EncoderV1}; pub const DEFAULT_SYNC_TIMEOUT: u64 = 10; pub const NUMBER_OF_UPDATE_TRIGGER_INIT_SYNC: u32 = 1; -const DEBOUNCE_DURATION: Duration = Duration::from_secs(10); - pub struct SyncControl { object: SyncObject, origin: CollabOrigin, /// The [CollabSink] is used to send the updates to the remote. It will send the current /// update periodically if the timeout is reached or it will send the next update if /// it receive previous ack from the remote. - sink: Arc>, + sink: Arc>, /// The [ObserveCollab] will be spawned in a separate task It continuously receive /// the updates from the remote. #[allow(dead_code)] observe_collab: ObserveCollab, - sync_state: Arc>, + sync_state_tx: broadcast::Sender, } impl Drop for SyncControl { @@ -73,8 +57,7 @@ where ) -> Self { let protocol = ClientSyncProtocol; let (notifier, notifier_rx) = watch::channel(SinkSignal::Proceed); - let sync_state = Arc::new(watch::channel(SyncState::InitSyncBegin).0); - let (sync_state_tx, sink_state_rx) = watch::channel(SinkState::Init); + let (sync_state_tx, _) = broadcast::channel(10); debug_assert!(origin.client_user_id().is_some()); // Create the sink and start the sink runner. @@ -83,7 +66,7 @@ where object.clone(), sink, notifier, - sync_state_tx, + sync_state_tx.clone(), sink_config, pause, )); @@ -100,34 +83,34 @@ where Arc::downgrade(&sink), ); - let weak_sync_state = Arc::downgrade(&sync_state); - let mut sink_state_stream = WatchStream::new(sink_state_rx); - // Subscribe the sink state stream and update the sync state in the background. - af_spawn(async move { - while let Some(collab_state) = sink_state_stream.next().await { - if let Some(sync_state) = weak_sync_state.upgrade() { - match collab_state { - SinkState::Syncing => { - let _ = sync_state.send(SyncState::Syncing); - }, - SinkState::Finished => { - let _ = sync_state.send(SyncState::SyncFinished); - }, - SinkState::Init => { - let _ = sync_state.send(SyncState::InitSyncBegin); - }, - SinkState::Pause => {}, - } - } - } - }); + // let weak_sync_state = Arc::downgrade(&sync_state); + // let mut sink_state_stream = WatchStream::new(sink_state_rx); + // // Subscribe the sink state stream and update the sync state in the background. + // af_spawn(async move { + // while let Some(collab_state) = sink_state_stream.next().await { + // if let Some(sync_state) = weak_sync_state.upgrade() { + // match collab_state { + // SinkState::Syncing => { + // let _ = sync_state.send(SyncState::Syncing); + // }, + // SinkState::Finished => { + // let _ = sync_state.send(SyncState::SyncFinished); + // }, + // SinkState::Init => { + // let _ = sync_state.send(SyncState::InitSyncBegin); + // }, + // SinkState::Pause => {}, + // } + // } + // } + // }); Self { object, origin, sink, observe_collab: stream, - sync_state, + sync_state_tx, } } @@ -141,8 +124,8 @@ where self.sink.resume(); } - pub fn subscribe_sync_state(&self) -> watch::Receiver { - self.sync_state.subscribe() + pub fn subscribe_sync_state(&self) -> broadcast::Receiver { + self.sync_state_tx.subscribe() } pub fn init_sync(&self, collab: &Collab) { @@ -169,7 +152,7 @@ pub fn start_sync( origin: CollabOrigin, sync_object: &SyncObject, collab: &Collab, - sink: &Arc>, + sink: &Arc>, ) where E: Into + Send + Sync + 'static, Sink: SinkExt, Error = E> + Send + Sync + Unpin + 'static, @@ -186,350 +169,43 @@ pub fn start_sync( msg_id, payload, ); + ClientCollabMessage::new_init_sync(init_sync) }) - } else { - sink.notify(); } } impl Deref for SyncControl { - type Target = Arc>; + type Target = Arc>; fn deref(&self) -> &Self::Target { &self.sink } } -/// Use to continuously receive updates from remote. -struct ObserveCollab { - object_id: String, - #[allow(dead_code)] - weak_collab: Weak, - phantom_sink: PhantomData, - phantom_stream: PhantomData, - // Use sequence number to check if the received updates/broadcasts are continuous. - #[allow(dead_code)] - seq_num_counter: Arc, -} - -impl Drop for ObserveCollab { - fn drop(&mut self) { - trace!("Drop SyncStream {}", self.object_id); - } +pub struct SinkConfig { + /// `timeout` is the time to wait for the remote to ack the message. If the remote + /// does not ack the message in time, the message will be sent again. + pub send_timeout: Duration, + /// `maximum_payload_size` is the maximum size of the messages to be merged. + pub maximum_payload_size: usize, } -impl ObserveCollab -where - E: Into + Send + Sync + 'static, - Sink: SinkExt, Error = E> + Send + Sync + Unpin + 'static, - Stream: StreamExt> + Send + Sync + Unpin + 'static, -{ - pub fn new( - origin: CollabOrigin, - object: SyncObject, - stream: Stream, - weak_collab: Weak, - sink: Weak>, - ) -> Self { - let seq_num_counter = Arc::new(AtomicU32::new(0)); - let last_init_sync = LastSyncTime::new(); - let object_id = object.object_id.clone(); - let cloned_weak_collab = weak_collab.clone(); - let cloned_seq_num_counter = seq_num_counter.clone(); - af_spawn(ObserveCollab::::observer_collab_message( - origin, - object, - stream, - cloned_weak_collab, - sink, - cloned_seq_num_counter, - last_init_sync, - )); - Self { - object_id, - weak_collab, - phantom_sink: Default::default(), - phantom_stream: Default::default(), - seq_num_counter, - } - } - - // Spawn the stream that continuously reads the doc's updates from remote. - async fn observer_collab_message( - origin: CollabOrigin, - object: SyncObject, - mut stream: Stream, - weak_collab: Weak, - weak_sink: Weak>, - seq_num_counter: Arc, - last_init_sync: LastSyncTime, - ) { - while let Some(collab_message_result) = stream.next().await { - let collab = match weak_collab.upgrade() { - Some(collab) => collab, - None => break, // Collab dropped, stop the stream. - }; - - let sink = match weak_sink.upgrade() { - Some(sink) => sink, - None => break, // Sink dropped, stop the stream. - }; - - let msg = match collab_message_result { - Ok(msg) => msg, - Err(err) => { - warn!( - "Stream error: {}, stop receive incoming changes", - err.into() - ); - break; - }, - }; - - if let Err(error) = ObserveCollab::::process_message( - &origin, - &object, - &collab, - &sink, - msg, - &seq_num_counter, - &last_init_sync, - ) - .await - { - if error.is_cannot_apply_update() { - // TODO(nathan): ask the client to resolve the conflict. - error!( - "collab:{} can not be synced because of error: {}", - object.object_id, error - ); - break; - } else { - error!("Error while processing message: {}", error); - } - } - } - } - - /// Continuously handle messages from the remote doc - async fn process_message( - origin: &CollabOrigin, - object: &SyncObject, - collab: &Arc, - sink: &Arc>, - msg: ServerCollabMessage, - seq_num_counter: &Arc, - last_init_time: &LastSyncTime, - ) -> Result<(), SyncError> { - // If server return the AckCode::ApplyInternalError, which means the server can not apply the - // update - if let ServerCollabMessage::ClientAck(ref ack) = msg { - if ack.code == AckCode::CannotApplyUpdate { - return Err(SyncError::CannotApplyUpdate(object.object_id.clone())); - } - } - - // msg_id will be None for [ServerBroadcast] or [ServerAwareness]. - match msg.msg_id() { - None => { - if let ServerCollabMessage::ServerBroadcast(ref data) = msg { - if let Err(err) = Self::validate_broadcast(object, data, seq_num_counter).await { - if err.is_missing_updates() { - Self::pull_missing_updates(origin, object, collab, sink, last_init_time).await; - return Ok(()); - } - } - } - Self::process_message_payload(&object.object_id, msg, collab, sink).await?; - sink.notify(); - Ok(()) - }, - Some(msg_id) => { - // Check if the message is acknowledged by the sink. - match sink.validate_response(msg_id, &msg, seq_num_counter).await { - Ok(is_valid) => { - if is_valid { - Self::process_message_payload(&object.object_id, msg, collab, sink).await?; - sink.notify(); - } - }, - Err(err) => { - // Update the last sync time if the message is valid. - if err.is_missing_updates() { - Self::pull_missing_updates(origin, object, collab, sink, last_init_time).await; - } else { - error!("Error while validating response: {}", err); - } - }, - } - Ok(()) - }, - } - } - - async fn process_message_payload( - object_id: &str, - msg: ServerCollabMessage, - collab: &Arc, - sink: &Arc>, - ) -> Result<(), SyncError> { - if !msg.payload().is_empty() { - let msg_origin = msg.origin(); - ObserveCollab::::process_payload( - msg_origin, - msg.payload(), - object_id, - collab, - sink, - ) - .await?; - } - Ok(()) +impl SinkConfig { + pub fn new() -> Self { + Self::default() } - - #[instrument(level = "trace", skip_all)] - async fn pull_missing_updates( - origin: &CollabOrigin, - object: &SyncObject, - collab: &Arc, - sink: &Arc>, - last_sync_time: &LastSyncTime, - ) { - let debounce_duration = if cfg!(debug_assertions) { - Duration::from_secs(2) - } else { - DEBOUNCE_DURATION - }; - - if sink.can_queue_init_sync() - && last_sync_time - .can_proceed_with_sync(debounce_duration) - .await - { - if let Some(lock_guard) = collab.try_lock() { - start_sync(origin.clone(), object, &lock_guard, sink); - } - } - } - - async fn validate_broadcast( - object: &SyncObject, - broadcast_sync: &BroadcastSync, - seq_num_counter: &Arc, - ) -> Result<(), SyncError> { - let prev_seq_num = seq_num_counter - .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| { - // must return Some to update the value - Some(broadcast_sync.seq_num) - }) - .unwrap(); - - check_update_contiguous(&object.object_id, broadcast_sync.seq_num, prev_seq_num)?; - - Ok(()) - } - - async fn process_payload( - origin: &CollabOrigin, - payload: &Bytes, - object_id: &str, - collab: &Arc, - sink: &Arc>, - ) -> Result<(), SyncError> { - if let Some(mut collab) = collab.try_lock() { - let mut decoder = DecoderV1::new(Cursor::new(payload)); - let reader = MessageReader::new(&mut decoder); - for msg in reader { - let msg = msg?; - let is_server_sync_step_1 = matches!(msg, Message::Sync(SyncMessage::SyncStep1(_))); - if let Some(payload) = handle_message(origin, &ClientSyncProtocol, &mut collab, msg)? { - let object_id = object_id.to_string(); - sink.queue_msg(|msg_id| { - if is_server_sync_step_1 { - ClientCollabMessage::new_server_init_sync(ServerInit::new( - origin.clone(), - object_id, - payload, - msg_id, - )) - } else { - ClientCollabMessage::new_update_sync(UpdateSync::new( - origin.clone(), - object_id, - payload, - msg_id, - )) - } - }); - } - } - } - Ok(()) + pub fn send_timeout(mut self, secs: u64) -> Self { + self.send_timeout = Duration::from_secs(secs); + self } } -struct LastSyncTime { - last_sync: Mutex, -} - -impl LastSyncTime { - fn new() -> Self { - let now = Instant::now(); - let one_hour = Duration::from_secs(3600); - // Use checked_sub to safely attempt subtraction, falling back to 'now' if underflow would occur - let one_hour_ago = now.checked_sub(one_hour).unwrap_or(now); - - LastSyncTime { - last_sync: Mutex::new(one_hour_ago), - } - } - - async fn can_proceed_with_sync(&self, debounce_duration: Duration) -> bool { - let now = Instant::now(); - let mut last_sync_locked = self.last_sync.lock().await; - if now.duration_since(*last_sync_locked) > debounce_duration { - *last_sync_locked = now; - true - } else { - false +impl Default for SinkConfig { + fn default() -> Self { + Self { + send_timeout: Duration::from_secs(DEFAULT_SYNC_TIMEOUT), + maximum_payload_size: 1024 * 10, } } } - -/// Check if the update is contiguous. -pub fn check_update_contiguous( - object_id: &str, - current_seq_num: u32, - prev_seq_num: u32, -) -> Result<(), SyncError> { - trace!( - "receive {} seq_num, prev:{}, current:{}", - object_id, - prev_seq_num, - current_seq_num, - ); - - // If the previous seq_num is 0, which means the doc is not synced before. - if prev_seq_num == 0 { - return Ok(()); - } - - if current_seq_num < prev_seq_num { - return Err(SyncError::Internal(anyhow!( - "{} invalid seq_num, prev:{}, current:{}", - object_id, - prev_seq_num, - current_seq_num, - ))); - } - - if current_seq_num > prev_seq_num + NUMBER_OF_UPDATE_TRIGGER_INIT_SYNC { - return Err(SyncError::MissingUpdates(format!( - "{} missing {} updates, should start init sync", - object_id, - current_seq_num - prev_seq_num, - ))); - } - Ok(()) -} diff --git a/libs/client-api/src/ws/client.rs b/libs/client-api/src/ws/client.rs index f60af2593..2784b3d33 100644 --- a/libs/client-api/src/ws/client.rs +++ b/libs/client-api/src/ws/client.rs @@ -434,7 +434,7 @@ fn handle_collab_message( if let Some(channels) = collab_channels.read().get(&object_id) { for channel in channels.iter() { if let Some(channel) = channel.upgrade() { - trace!("🌐receive server message: {}", collab_msg); + trace!("🌐receive server: {}", collab_msg); channel.forward_to_stream(collab_msg.clone()); } } diff --git a/libs/collab-rt-entity/src/client_message.rs b/libs/collab-rt-entity/src/client_message.rs index 3339928a8..fd4bd87cd 100644 --- a/libs/collab-rt-entity/src/client_message.rs +++ b/libs/collab-rt-entity/src/client_message.rs @@ -20,6 +20,7 @@ pub trait SinkMessage: Clone + Send + Sync + 'static + Ord + Display { fn is_client_init_sync(&self) -> bool; fn is_server_init_sync(&self) -> bool; fn is_update_sync(&self) -> bool; + fn is_ping_sync(&self) -> bool; } #[derive(Clone, Debug, Serialize, Deserialize)] pub enum ClientCollabMessage { @@ -27,6 +28,7 @@ pub enum ClientCollabMessage { ClientUpdateSync { data: UpdateSync }, ServerInitSync(ServerInit), ClientAwarenessSync(UpdateSync), + ClientPingSync(PingSync), } impl ClientCollabMessage { @@ -51,6 +53,7 @@ impl ClientCollabMessage { ClientCollabMessage::ClientUpdateSync { data, .. } => data.payload.len(), ClientCollabMessage::ServerInitSync(msg) => msg.payload.len(), ClientCollabMessage::ClientAwarenessSync(data) => data.payload.len(), + ClientCollabMessage::ClientPingSync(_) => 0, } } pub fn object_id(&self) -> &str { @@ -59,6 +62,7 @@ impl ClientCollabMessage { ClientCollabMessage::ClientUpdateSync { data, .. } => &data.object_id, ClientCollabMessage::ServerInitSync(msg) => &msg.object_id, ClientCollabMessage::ClientAwarenessSync(data) => &data.object_id, + ClientCollabMessage::ClientPingSync(data) => &data.object_id, } } @@ -68,14 +72,17 @@ impl ClientCollabMessage { ClientCollabMessage::ClientUpdateSync { data, .. } => &data.origin, ClientCollabMessage::ServerInitSync(msg) => &msg.origin, ClientCollabMessage::ClientAwarenessSync(data) => &data.origin, + ClientCollabMessage::ClientPingSync(data) => &data.origin, } } pub fn payload(&self) -> &Bytes { + static EMPTY_BYTES: Bytes = Bytes::from_static(b""); match self { ClientCollabMessage::ClientInitSync { data, .. } => &data.payload, ClientCollabMessage::ClientUpdateSync { data, .. } => &data.payload, ClientCollabMessage::ServerInitSync(msg) => &msg.payload, ClientCollabMessage::ClientAwarenessSync(data) => &data.payload, + ClientCollabMessage::ClientPingSync(_) => &EMPTY_BYTES, } } pub fn device_id(&self) -> Option { @@ -91,6 +98,7 @@ impl ClientCollabMessage { ClientCollabMessage::ClientUpdateSync { data, .. } => data.msg_id, ClientCollabMessage::ServerInitSync(value) => value.msg_id, ClientCollabMessage::ClientAwarenessSync(data) => data.msg_id, + ClientCollabMessage::ClientPingSync(data) => data.msg_id, } } @@ -112,6 +120,7 @@ impl Display for ClientCollabMessage { data.msg_id, data.payload.len(), )), + ClientCollabMessage::ClientPingSync(data) => Display::fmt(data, f), } } } @@ -177,6 +186,9 @@ impl SinkMessage for ClientCollabMessage { fn is_update_sync(&self) -> bool { matches!(self, ClientCollabMessage::ClientUpdateSync { .. }) } + fn is_ping_sync(&self) -> bool { + matches!(self, ClientCollabMessage::ClientPingSync { .. }) + } } impl Hash for ClientCollabMessage { @@ -345,3 +357,21 @@ impl Display for UpdateSync { )) } } + +#[derive(Clone, Eq, PartialEq, Debug, Serialize, Deserialize, Hash)] +pub struct PingSync { + pub origin: CollabOrigin, + pub object_id: String, + pub msg_id: MsgId, +} + +impl Display for PingSync { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!( + "ping: [uid:{}|oid:{}|msg_id:{:?}]", + self.origin.client_user_id().unwrap_or(0), + self.object_id, + self.msg_id, + )) + } +} diff --git a/libs/collab-rt-entity/src/message.rs b/libs/collab-rt-entity/src/message.rs index c417dc1c1..20cc2ff3c 100644 --- a/libs/collab-rt-entity/src/message.rs +++ b/libs/collab-rt-entity/src/message.rs @@ -174,15 +174,15 @@ pub enum CollabMessage { ServerBroadcast(BroadcastSync), } -impl crate::CollabMessage { - pub fn msg_id(&self) -> Option { +impl CollabMessage { + pub fn msg_id(&self) -> Option { match self { - crate::CollabMessage::ClientInitSync(value) => Some(value.msg_id), - crate::CollabMessage::ClientUpdateSync(value) => Some(value.msg_id), - crate::CollabMessage::ClientAck(value) => Some(value.msg_id), - crate::CollabMessage::ServerInitSync(value) => Some(value.msg_id), - crate::CollabMessage::ServerBroadcast(_) => None, - crate::CollabMessage::AwarenessSync(_) => None, + CollabMessage::ClientInitSync(value) => Some(value.msg_id), + CollabMessage::ClientUpdateSync(value) => Some(value.msg_id), + CollabMessage::ClientAck(value) => Some(value.msg_id), + CollabMessage::ServerInitSync(value) => Some(value.msg_id), + CollabMessage::ServerBroadcast(_) => None, + CollabMessage::AwarenessSync(_) => None, } } @@ -191,12 +191,12 @@ impl crate::CollabMessage { } pub fn payload(&self) -> &Bytes { match self { - crate::CollabMessage::ClientInitSync(value) => &value.payload, - crate::CollabMessage::ClientUpdateSync(value) => &value.payload, - crate::CollabMessage::ClientAck(value) => &value.payload, - crate::CollabMessage::ServerInitSync(value) => &value.payload, - crate::CollabMessage::ServerBroadcast(value) => &value.payload, - crate::CollabMessage::AwarenessSync(value) => &value.payload, + CollabMessage::ClientInitSync(value) => &value.payload, + CollabMessage::ClientUpdateSync(value) => &value.payload, + CollabMessage::ClientAck(value) => &value.payload, + CollabMessage::ServerInitSync(value) => &value.payload, + CollabMessage::ServerBroadcast(value) => &value.payload, + CollabMessage::AwarenessSync(value) => &value.payload, } } pub fn is_empty(&self) -> bool { @@ -204,12 +204,12 @@ impl crate::CollabMessage { } pub fn origin(&self) -> &CollabOrigin { match self { - crate::CollabMessage::ClientInitSync(value) => &value.origin, - crate::CollabMessage::ClientUpdateSync(value) => &value.origin, - crate::CollabMessage::ClientAck(value) => &value.origin, - crate::CollabMessage::ServerInitSync(value) => &value.origin, - crate::CollabMessage::ServerBroadcast(value) => &value.origin, - crate::CollabMessage::AwarenessSync(value) => &value.origin, + CollabMessage::ClientInitSync(value) => &value.origin, + CollabMessage::ClientUpdateSync(value) => &value.origin, + CollabMessage::ClientAck(value) => &value.origin, + CollabMessage::ServerInitSync(value) => &value.origin, + CollabMessage::ServerBroadcast(value) => &value.origin, + CollabMessage::AwarenessSync(value) => &value.origin, } } @@ -219,83 +219,66 @@ impl crate::CollabMessage { pub fn object_id(&self) -> &str { match self { - crate::CollabMessage::ClientInitSync(value) => &value.object_id, - crate::CollabMessage::ClientUpdateSync(value) => &value.object_id, - crate::CollabMessage::ClientAck(value) => &value.object_id, - crate::CollabMessage::ServerInitSync(value) => &value.object_id, - crate::CollabMessage::ServerBroadcast(value) => &value.object_id, - crate::CollabMessage::AwarenessSync(value) => &value.object_id, + CollabMessage::ClientInitSync(value) => &value.object_id, + CollabMessage::ClientUpdateSync(value) => &value.object_id, + CollabMessage::ClientAck(value) => &value.object_id, + CollabMessage::ServerInitSync(value) => &value.object_id, + CollabMessage::ServerBroadcast(value) => &value.object_id, + CollabMessage::AwarenessSync(value) => &value.object_id, } } } -impl Display for crate::CollabMessage { +impl Display for CollabMessage { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { - crate::CollabMessage::ClientInitSync(value) => Display::fmt(&value, f), - crate::CollabMessage::ClientUpdateSync(value) => Display::fmt(&value, f), - crate::CollabMessage::ClientAck(value) => Display::fmt(&value, f), - crate::CollabMessage::ServerInitSync(value) => Display::fmt(&value, f), - crate::CollabMessage::ServerBroadcast(value) => Display::fmt(&value, f), - crate::CollabMessage::AwarenessSync(value) => Display::fmt(&value, f), + CollabMessage::ClientInitSync(value) => Display::fmt(&value, f), + CollabMessage::ClientUpdateSync(value) => Display::fmt(&value, f), + CollabMessage::ClientAck(value) => Display::fmt(&value, f), + CollabMessage::ServerInitSync(value) => Display::fmt(&value, f), + CollabMessage::ServerBroadcast(value) => Display::fmt(&value, f), + CollabMessage::AwarenessSync(value) => Display::fmt(&value, f), } } } -impl From for crate::CollabMessage { - fn from(value: ClientCollabMessage) -> Self { - match value { - ClientCollabMessage::ClientInitSync { data, .. } => { - crate::CollabMessage::ClientInitSync(data) - }, - ClientCollabMessage::ClientUpdateSync { data, .. } => { - crate::CollabMessage::ClientUpdateSync(data) - }, - ClientCollabMessage::ServerInitSync(data) => crate::CollabMessage::ServerInitSync(data), - ClientCollabMessage::ClientAwarenessSync(data) => { - crate::CollabMessage::ClientUpdateSync(data) - }, - } - } -} - -impl From for crate::CollabMessage { +impl From for CollabMessage { fn from(value: CollabAck) -> Self { - crate::CollabMessage::ClientAck(value) + CollabMessage::ClientAck(value) } } -impl From for crate::CollabMessage { +impl From for CollabMessage { fn from(value: BroadcastSync) -> Self { - crate::CollabMessage::ServerBroadcast(value) + CollabMessage::ServerBroadcast(value) } } -impl From for crate::CollabMessage { +impl From for CollabMessage { fn from(value: InitSync) -> Self { - crate::CollabMessage::ClientInitSync(value) + CollabMessage::ClientInitSync(value) } } -impl From for crate::CollabMessage { +impl From for CollabMessage { fn from(value: UpdateSync) -> Self { - crate::CollabMessage::ClientUpdateSync(value) + CollabMessage::ClientUpdateSync(value) } } -impl From for crate::CollabMessage { +impl From for CollabMessage { fn from(value: AwarenessSync) -> Self { - crate::CollabMessage::AwarenessSync(value) + CollabMessage::AwarenessSync(value) } } -impl From for crate::CollabMessage { +impl From for CollabMessage { fn from(value: ServerInit) -> Self { - crate::CollabMessage::ServerInitSync(value) + CollabMessage::ServerInitSync(value) } } -impl TryFrom for crate::CollabMessage { +impl TryFrom for CollabMessage { type Error = anyhow::Error; fn try_from(value: RealtimeMessage) -> Result { @@ -306,8 +289,8 @@ impl TryFrom for crate::CollabMessage { } } -impl From for RealtimeMessage { - fn from(msg: crate::CollabMessage) -> Self { +impl From for RealtimeMessage { + fn from(msg: CollabMessage) -> Self { Self::Collab(msg) } } diff --git a/libs/collab-rt-entity/src/server_message.rs b/libs/collab-rt-entity/src/server_message.rs index 766d42213..43f3b39d1 100644 --- a/libs/collab-rt-entity/src/server_message.rs +++ b/libs/collab-rt-entity/src/server_message.rs @@ -292,8 +292,7 @@ impl BroadcastSync { impl Display for BroadcastSync { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.write_fmt(format_args!( - "broadcast: [uid:{}|oid:{}|len:{}]", - self.origin.client_user_id().unwrap_or(0), + "broadcast: [oid:{}|len:{}]", self.object_id, self.payload.len(), )) diff --git a/libs/collab-rt/src/client_msg_router.rs b/libs/collab-rt/src/client_msg_router.rs index 4dfe80dd4..fc00055da 100644 --- a/libs/collab-rt/src/client_msg_router.rs +++ b/libs/collab-rt/src/client_msg_router.rs @@ -2,7 +2,7 @@ use crate::util::channel_ext::UnboundedSenderSink; use crate::RealtimeAccessControl; use async_trait::async_trait; use collab_rt_entity::user::RealtimeUser; -use collab_rt_entity::{ClientCollabMessage, SinkMessage}; +use collab_rt_entity::ClientCollabMessage; use collab_rt_entity::{MessageByObjectId, RealtimeMessage}; use std::sync::Arc; use std::time::Duration; @@ -180,17 +180,8 @@ impl ClientMessageRouter { let mut valid_messages = Vec::with_capacity(messages.len()); let mut invalid_messages = Vec::with_capacity(messages.len()); - let can_read = access_control - .can_read_collab(workspace_id, uid, object_id) - .await - .unwrap_or(false); for message in messages { - if message.is_client_init_sync() && can_read { - valid_messages.push(message); - continue; - } - if can_write { valid_messages.push(message); } else { diff --git a/libs/collab-rt/src/collaborate/group_broadcast.rs b/libs/collab-rt/src/collaborate/group_broadcast.rs index b6d4aeff0..c1258792a 100644 --- a/libs/collab-rt/src/collaborate/group_broadcast.rs +++ b/libs/collab-rt/src/collaborate/group_broadcast.rs @@ -24,8 +24,8 @@ use crate::error::RealtimeError; use crate::metrics::CollabMetricsCalculate; use collab_rt_entity::user::RealtimeUser; -use collab_rt_entity::AckCode; use collab_rt_entity::MessageByObjectId; +use collab_rt_entity::{AckCode, MsgId}; use collab_rt_entity::{ AwarenessSync, BroadcastSync, ClientCollabMessage, CollabAck, CollabMessage, }; @@ -325,9 +325,6 @@ async fn handle_client_messages( }, } } - - // update the last sync time - update_last_sync_at(&collab); } } @@ -339,9 +336,47 @@ async fn handle_one_client_message( metrics_calculate: &CollabMetricsCalculate, seq_num_counter: &Rc, ) -> Result { - trace!("Applying client updates: {}", collab_msg); - let mut decoder = DecoderV1::from(collab_msg.payload().as_ref()); - let origin = collab_msg.origin().clone(); + let msg_id = collab_msg.msg_id(); + let message_origin = collab_msg.origin().clone(); + let seq_num = seq_num_counter.load(Ordering::SeqCst); + + // If the payload is empty, we don't need to apply any updates to the document. + // Currently, only the ping message should has an empty payload. + if collab_msg.payload().is_empty() { + if !matches!(collab_msg, ClientCollabMessage::ClientPingSync(_)) { + error!("receive unexpected empty payload message:{}", collab_msg); + } + let resp = CollabAck::new(message_origin, object_id.to_string(), msg_id, seq_num); + Ok(resp) + } else { + trace!("Applying client updates: {}", collab_msg); + let ack = handle_one_message_payload( + object_id, + message_origin, + msg_id, + collab_msg.payload(), + collab, + metrics_calculate, + seq_num, + ) + .await?; + + update_last_sync_at(collab); + Ok(ack) + } +} + +/// Handle the message sent from the client +async fn handle_one_message_payload( + object_id: &str, + origin: CollabOrigin, + msg_id: MsgId, + payload: &[u8], + collab: &Mutex, + metrics_calculate: &CollabMetricsCalculate, + seq_num: u32, +) -> Result { + let mut decoder = DecoderV1::from(payload); let reader = MessageReader::new(&mut decoder); let mut ack_response = None; @@ -364,7 +399,6 @@ async fn handle_one_client_message( }, }; - let seq_num = seq_num_counter.load(Ordering::SeqCst); for msg in reader { match msg { Ok(msg) => { @@ -377,13 +411,8 @@ async fn handle_one_client_message( // One ClientCollabMessage can have multiple Yrs [Message] in it, but we only need to // send one ack back to the client. if ack_response.is_none() { - let resp = CollabAck::new( - origin.clone(), - object_id.to_string(), - collab_msg.msg_id(), - seq_num, - ) - .with_payload(payload.unwrap_or_default()); + let resp = CollabAck::new(origin.clone(), object_id.to_string(), msg_id, seq_num) + .with_payload(payload.unwrap_or_default()); ack_response = Some(resp); } }, @@ -393,13 +422,8 @@ async fn handle_one_client_message( .fetch_add(1, Ordering::Relaxed); error!("handle collab:{} message error:{}", object_id, err); if ack_response.is_none() { - let resp = CollabAck::new( - origin.clone(), - object_id.to_string(), - collab_msg.msg_id(), - seq_num, - ) - .with_code(ack_code_from_error(&err)); + let resp = CollabAck::new(origin.clone(), object_id.to_string(), msg_id, seq_num) + .with_code(ack_code_from_error(&err)); ack_response = Some(resp); } break; @@ -479,7 +503,7 @@ fn gen_update_message(update: &[u8]) -> Vec { } #[inline] -fn update_last_sync_at(collab: &Rc>) { +fn update_last_sync_at(collab: &Mutex) { if let Ok(collab) = collab.try_lock() { collab.set_last_sync_at(chrono::Utc::now().timestamp()); } diff --git a/tests/collab/edit_permission.rs b/tests/collab/edit_permission.rs index 45ba1e58c..6998620d6 100644 --- a/tests/collab/edit_permission.rs +++ b/tests/collab/edit_permission.rs @@ -45,115 +45,115 @@ async fn recv_updates_without_permission_test() { assert_client_collab_within_secs(&mut client_2, &object_id, "name", json!({}), 60).await; } -#[tokio::test] -async fn recv_remote_updates_with_readonly_permission_test() { - let collab_type = CollabType::Document; - let mut client_1 = TestClient::new_user().await; - let mut client_2 = TestClient::new_user().await; - - let workspace_id = client_1.workspace_id().await; - let object_id = client_1 - .create_and_edit_collab(&workspace_id, collab_type.clone()) - .await; - - // Add client 2 as the member of the collab then the client 2 will receive the update. - client_1 - .add_collab_member( - &workspace_id, - &object_id, - &client_2, - AFAccessLevel::ReadOnly, - ) - .await; - - client_2 - .open_collab(&workspace_id, &object_id, collab_type.clone()) - .await; - - // Edit the collab from client 1 and then the server will broadcast to client 2 - client_1 - .collabs - .get_mut(&object_id) - .unwrap() - .collab - .lock() - .insert("name", "AppFlowy"); - client_1 - .wait_object_sync_complete(&object_id) - .await - .unwrap(); - - let expected = json!({ - "name": "AppFlowy" - }); - assert_client_collab_within_secs(&mut client_2, &object_id, "name", expected.clone(), 60).await; - assert_server_collab( - &workspace_id, - &mut client_1.api_client, - &object_id, - &collab_type, - 10, - expected, - ) - .await - .unwrap(); -} - -#[tokio::test] -async fn init_sync_with_readonly_permission_test() { - let collab_type = CollabType::Document; - let mut client_1 = TestClient::new_user().await; - let mut client_2 = TestClient::new_user().await; - - let workspace_id = client_1.workspace_id().await; - let object_id = client_1 - .create_and_edit_collab(&workspace_id, collab_type.clone()) - .await; - client_1 - .collabs - .get_mut(&object_id) - .unwrap() - .collab - .lock() - .insert("name", "AppFlowy"); - client_1 - .wait_object_sync_complete(&object_id) - .await - .unwrap(); - sleep(Duration::from_secs(2)).await; - - // - let expected = json!({ - "name": "AppFlowy" - }); - assert_server_collab( - &workspace_id, - &mut client_1.api_client, - &object_id, - &collab_type, - 10, - expected.clone(), - ) - .await - .unwrap(); - - // Add client 2 as the member of the collab with readonly permission. - // client 2 can pull the latest updates via the init sync. But it's not allowed to send local changes. - client_1 - .add_collab_member( - &workspace_id, - &object_id, - &client_2, - AFAccessLevel::ReadOnly, - ) - .await; - client_2 - .open_collab(&workspace_id, &object_id, collab_type.clone()) - .await; - assert_client_collab_include_value(&mut client_2, &object_id, expected) - .await - .unwrap(); -} +// #[tokio::test] +// async fn recv_remote_updates_with_readonly_permission_test() { +// let collab_type = CollabType::Document; +// let mut client_1 = TestClient::new_user().await; +// let mut client_2 = TestClient::new_user().await; +// +// let workspace_id = client_1.workspace_id().await; +// let object_id = client_1 +// .create_and_edit_collab(&workspace_id, collab_type.clone()) +// .await; +// +// // Add client 2 as the member of the collab then the client 2 will receive the update. +// client_1 +// .add_collab_member( +// &workspace_id, +// &object_id, +// &client_2, +// AFAccessLevel::ReadOnly, +// ) +// .await; +// +// client_2 +// .open_collab(&workspace_id, &object_id, collab_type.clone()) +// .await; +// +// // Edit the collab from client 1 and then the server will broadcast to client 2 +// client_1 +// .collabs +// .get_mut(&object_id) +// .unwrap() +// .collab +// .lock() +// .insert("name", "AppFlowy"); +// client_1 +// .wait_object_sync_complete(&object_id) +// .await +// .unwrap(); +// +// let expected = json!({ +// "name": "AppFlowy" +// }); +// assert_client_collab_within_secs(&mut client_2, &object_id, "name", expected.clone(), 60).await; +// assert_server_collab( +// &workspace_id, +// &mut client_1.api_client, +// &object_id, +// &collab_type, +// 10, +// expected, +// ) +// .await +// .unwrap(); +// } + +// #[tokio::test] +// async fn init_sync_with_readonly_permission_test() { +// let collab_type = CollabType::Document; +// let mut client_1 = TestClient::new_user().await; +// let mut client_2 = TestClient::new_user().await; +// +// let workspace_id = client_1.workspace_id().await; +// let object_id = client_1 +// .create_and_edit_collab(&workspace_id, collab_type.clone()) +// .await; +// client_1 +// .collabs +// .get_mut(&object_id) +// .unwrap() +// .collab +// .lock() +// .insert("name", "AppFlowy"); +// client_1 +// .wait_object_sync_complete(&object_id) +// .await +// .unwrap(); +// sleep(Duration::from_secs(2)).await; +// +// // +// let expected = json!({ +// "name": "AppFlowy" +// }); +// assert_server_collab( +// &workspace_id, +// &mut client_1.api_client, +// &object_id, +// &collab_type, +// 10, +// expected.clone(), +// ) +// .await +// .unwrap(); +// +// // Add client 2 as the member of the collab with readonly permission. +// // client 2 can pull the latest updates via the init sync. But it's not allowed to send local changes. +// client_1 +// .add_collab_member( +// &workspace_id, +// &object_id, +// &client_2, +// AFAccessLevel::ReadOnly, +// ) +// .await; +// client_2 +// .open_collab(&workspace_id, &object_id, collab_type.clone()) +// .await; +// assert_client_collab_include_value(&mut client_2, &object_id, expected) +// .await +// .unwrap(); +// } #[tokio::test] async fn edit_collab_with_readonly_permission_test() { @@ -554,15 +554,14 @@ async fn multiple_user_with_read_only_permission_edit_same_collab_test() { let results = futures::future::join_all(tasks).await; for (index, result) in results.into_iter().enumerate() { let (s, client) = result.unwrap(); - assert_json_eq!( - json!({index.to_string(): s}), - client - .collabs - .get(&object_id) - .unwrap() - .collab - .to_json_value(), - ); + let value = client + .collabs + .get(&object_id) + .unwrap() + .collab + .to_json_value(); + + assert_json_eq!(json!({index.to_string(): s}), value,); } // all the clients should have the same collab object assert_json_eq!( diff --git a/tests/collab/multi_devices_edit.rs b/tests/collab/multi_devices_edit.rs index 5230ad417..90ac51e13 100644 --- a/tests/collab/multi_devices_edit.rs +++ b/tests/collab/multi_devices_edit.rs @@ -1,5 +1,3 @@ -use crate::collab::util::generate_random_string; -use client_api::collab_sync::NUMBER_OF_UPDATE_TRIGGER_INIT_SYNC; use client_api_test_util::*; use collab_entity::CollabType; use database_entity::dto::{AFAccessLevel, QueryCollabParams}; @@ -279,8 +277,7 @@ async fn edit_document_with_both_clients_offline_then_online_sync_test() { } #[tokio::test] -async fn init_sync_when_missing_updates_test() { - let text = generate_random_string(1024); +async fn second_client_missing_broadcast_and_then_pull_missing_updates_test() { let collab_type = CollabType::Document; let mut client_1 = TestClient::new_user().await; let mut client_2 = TestClient::new_user().await; @@ -299,6 +296,16 @@ async fn init_sync_when_missing_updates_test() { ) .await; + // after client 2 finish init sync and then disable receive message + client_2 + .open_collab(&workspace_id, &object_id, collab_type.clone()) + .await; + client_2 + .wait_object_sync_complete(&object_id) + .await + .unwrap(); + client_2.ws_client.disable_receive_message(); + // Client_1 makes the first edit by inserting "task 1". client_1 .collabs @@ -306,109 +313,76 @@ async fn init_sync_when_missing_updates_test() { .unwrap() .collab .lock() - .insert("1", "task 1"); + .insert("content", "hello world"); client_1 .wait_object_sync_complete(&object_id) .await .unwrap(); - // Client_2 opens the collaboration, triggering an initial sync to receive "task 1". - client_2 - .open_collab(&workspace_id, &object_id, collab_type.clone()) - .await; - client_2 - .wait_object_sync_complete(&object_id) - .await - .unwrap(); - - // Validate both clients have "task 1" after the initial sync. - assert_eq!( - client_1.get_edit_collab_json(&object_id).await, - json!({ "1": "task 1" }) - ); - assert_eq!( - client_2.get_edit_collab_json(&object_id).await, - json!({ "1": "task 1" }) - ); + // sleep two seconds to make sure missing the server broadcast message + sleep(Duration::from_secs(2)).await; + // after a period of time, client 2 should trigger init sync + client_2.ws_client.enable_receive_message(); - // Simulate client_2 missing updates by enabling skip_realtime_message. - client_2.ws_client.disable_receive_message(); - client_1 - .wait_object_sync_complete(&object_id) + let expected_json = json!({ + "content": "hello world" + }); + assert_client_collab_include_value(&mut client_2, &object_id, expected_json) .await .unwrap(); +} - // Client_1 inserts "task 2", which client_2 misses due to skipping realtime messages. - for _ in 0..2 * NUMBER_OF_UPDATE_TRIGGER_INIT_SYNC { - client_1 - .collabs - .get_mut(&object_id) - .unwrap() - .collab - .lock() - .insert("2", text.clone()); - } +#[tokio::test] +async fn client_pending_update_test() { + let collab_type = CollabType::Document; + let mut client_1 = TestClient::new_user().await; + let mut client_2 = TestClient::new_user().await; + // Create a collaborative document with client_1 and invite client_2 to collaborate. + let workspace_id = client_1.workspace_id().await; + let object_id = client_1 + .create_and_edit_collab(&workspace_id, collab_type.clone()) + .await; client_1 - .wait_object_sync_complete(&object_id) - .await - .unwrap(); + .add_collab_member( + &workspace_id, + &object_id, + &client_2, + AFAccessLevel::ReadAndWrite, + ) + .await; + // after client 2 finish init sync and then disable receive message client_2 - .collabs - .get_mut(&object_id) - .unwrap() - .collab - .lock() - .insert("3", "task 3"); + .open_collab(&workspace_id, &object_id, collab_type.clone()) + .await; client_2 .wait_object_sync_complete(&object_id) .await .unwrap(); + client_2.ws_client.disable_receive_message(); - // Validate client_1's view includes "task 2", and "task 3", while client_2 missed key2 and key3. - assert_client_collab_include_value( - &mut client_1, - &object_id, - json!({ "1": "task 1", "2": text.clone(), "3": "task 3" }), - ) - .await - .unwrap(); - assert_eq!( - client_2.get_edit_collab_json(&object_id).await, - json!({ "1": "task 1", "3": "task 3" }) - ); - - // client_2 resumes receiving messages - // - // 1. **Client 1 Initiates a Sync**: This action sends a sync message to the server. - // 2. **Server Broadcasts to Client 2**: The server, upon receiving the sync message - // from Client 1, broadcasts a message to Client 2. - // 3. **Sequence Number Check**: The sequence number (seq num) of the broadcast message received - // by Client 2 is checked against the sequence number of the sync message from Client 1. - // 4. **Condition for Init Sync**: If the sequence number of Client 2's broadcast message is - // less than the sequence number of the sync message from Client 1, this condition triggers an - // initialization sync for Client 2. - // - // This ensures that all clients are synchronized and have the latest information, with the initiation sync being triggered based on the comparison of sequence numbers to maintain consistency across the system. - println!("client_2 enable_receive_message"); - client_2.ws_client.enable_receive_message(); + // Client_1 makes the first edit by inserting "task 1". client_1 .collabs .get_mut(&object_id) .unwrap() .collab .lock() - .insert("4", "task 4"); + .insert("content", "hello world"); client_1 .wait_object_sync_complete(&object_id) .await .unwrap(); - assert_client_collab_include_value( - &mut client_2, - &object_id, - json!({ "1": "task 1", "2": text.clone(), "3": "task 3", "4": "task 4" }), - ) - .await - .unwrap(); + // sleep two seconds to make sure missing the server broadcast message + sleep(Duration::from_secs(2)).await; + // after a period of time, client 2 should trigger init sync + client_2.ws_client.enable_receive_message(); + + let expected_json = json!({ + "content": "hello world" + }); + assert_client_collab_include_value(&mut client_2, &object_id, expected_json) + .await + .unwrap(); }