From f0093974eecf1611bbfcc0c3648cc7eb4e3c97c7 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 16 Sep 2021 11:06:49 -0700 Subject: [PATCH 01/43] Always store an rpc client on local worktrees Even before sharing, we now want to advertise that the user is working on a certain worktree, to make that visible to all of the worktree's collaborators. --- server/src/rpc.rs | 35 ++++++++++++++----------------- zed/src/editor/buffer.rs | 7 +++++-- zed/src/workspace.rs | 5 +++-- zed/src/worktree.rs | 45 +++++++++++++++++++++++----------------- 4 files changed, 49 insertions(+), 43 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index debd982366c7a..3564b4285df93 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -1072,9 +1072,10 @@ mod tests { ) .await; let worktree_a = Worktree::open_local( + client_a.clone(), "/a".as_ref(), - lang_registry.clone(), fs, + lang_registry.clone(), &mut cx_a.to_async(), ) .await @@ -1083,9 +1084,7 @@ mod tests { .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) .await; let (worktree_id, worktree_token) = worktree_a - .update(&mut cx_a, |tree, cx| { - tree.as_local_mut().unwrap().share(client_a.clone(), cx) - }) + .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx)) .await .unwrap(); @@ -1181,9 +1180,10 @@ mod tests { .await; let worktree_a = Worktree::open_local( + client_a.clone(), "/a".as_ref(), - lang_registry.clone(), fs.clone(), + lang_registry.clone(), &mut cx_a.to_async(), ) .await @@ -1192,9 +1192,7 @@ mod tests { .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) .await; let (worktree_id, worktree_token) = worktree_a - .update(&mut cx_a, |tree, cx| { - tree.as_local_mut().unwrap().share(client_a.clone(), cx) - }) + .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx)) .await .unwrap(); @@ -1314,9 +1312,10 @@ mod tests { .await .unwrap(); let worktree_a = Worktree::open_local( + client_a.clone(), "/".as_ref(), - lang_registry.clone(), fs, + lang_registry.clone(), &mut cx_a.to_async(), ) .await @@ -1325,9 +1324,7 @@ mod tests { .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) .await; let (worktree_id, worktree_token) = worktree_a - .update(&mut cx_a, |tree, cx| { - tree.as_local_mut().unwrap().share(client_a.clone(), cx) - }) + .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx)) .await .unwrap(); @@ -1395,9 +1392,10 @@ mod tests { .await .unwrap(); let worktree_a = Worktree::open_local( + client_a.clone(), "/".as_ref(), - lang_registry.clone(), fs, + lang_registry.clone(), &mut cx_a.to_async(), ) .await @@ -1406,9 +1404,7 @@ mod tests { .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) .await; let (worktree_id, worktree_token) = worktree_a - .update(&mut cx_a, |tree, cx| { - tree.as_local_mut().unwrap().share(client_a.clone(), cx) - }) + .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx)) .await .unwrap(); @@ -1460,9 +1456,10 @@ mod tests { ) .await; let worktree_a = Worktree::open_local( + client_a.clone(), "/a".as_ref(), - lang_registry.clone(), fs, + lang_registry.clone(), &mut cx_a.to_async(), ) .await @@ -1471,9 +1468,7 @@ mod tests { .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) .await; let (worktree_id, worktree_token) = worktree_a - .update(&mut cx_a, |tree, cx| { - tree.as_local_mut().unwrap().share(client_a.clone(), cx) - }) + .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx)) .await .unwrap(); diff --git a/zed/src/editor/buffer.rs b/zed/src/editor/buffer.rs index 97e0202cec4a6..8e82e80fcdc40 100644 --- a/zed/src/editor/buffer.rs +++ b/zed/src/editor/buffer.rs @@ -2913,6 +2913,7 @@ mod tests { use crate::{ fs::RealFs, language::LanguageRegistry, + rpc, test::temp_tree, util::RandomCharIter, worktree::{Worktree, WorktreeHandle as _}, @@ -3379,9 +3380,10 @@ mod tests { "file3": "ghi", })); let tree = Worktree::open_local( + rpc::Client::new(), dir.path(), - Default::default(), Arc::new(RealFs), + Default::default(), &mut cx.to_async(), ) .await @@ -3501,9 +3503,10 @@ mod tests { let initial_contents = "aaa\nbbbbb\nc\n"; let dir = temp_tree(json!({ "the-file": initial_contents })); let tree = Worktree::open_local( + rpc::Client::new(), dir.path(), - Default::default(), Arc::new(RealFs), + Default::default(), &mut cx.to_async(), ) .await diff --git a/zed/src/workspace.rs b/zed/src/workspace.rs index ff3666e0de077..8582745ffe476 100644 --- a/zed/src/workspace.rs +++ b/zed/src/workspace.rs @@ -546,10 +546,11 @@ impl Workspace { cx: &mut ViewContext, ) -> Task>> { let languages = self.languages.clone(); + let rpc = self.rpc.clone(); let fs = self.fs.clone(); let path = Arc::from(path); cx.spawn(|this, mut cx| async move { - let worktree = Worktree::open_local(path, languages, fs, &mut cx).await?; + let worktree = Worktree::open_local(rpc, path, fs, languages, &mut cx).await?; this.update(&mut cx, |this, cx| { cx.observe(&worktree, |_, _, cx| cx.notify()).detach(); this.worktrees.insert(worktree.clone()); @@ -826,7 +827,7 @@ impl Workspace { let worktree = this.worktrees.iter().next()?; worktree.update(cx, |worktree, cx| { let worktree = worktree.as_local_mut()?; - Some(worktree.share(rpc, cx)) + Some(worktree.share(cx)) }) }); diff --git a/zed/src/worktree.rs b/zed/src/worktree.rs index 7b2fea91756c4..a6765a2d26afe 100644 --- a/zed/src/worktree.rs +++ b/zed/src/worktree.rs @@ -69,7 +69,7 @@ impl Entity for Worktree { Self::Local(tree) => tree .share .as_ref() - .map(|share| (share.rpc.clone(), share.remote_id)), + .map(|share| (tree.rpc.clone(), share.remote_id)), Self::Remote(tree) => Some((tree.rpc.clone(), tree.remote_id)), }; @@ -86,12 +86,14 @@ impl Entity for Worktree { impl Worktree { pub async fn open_local( + rpc: Arc, path: impl Into>, - languages: Arc, fs: Arc, + languages: Arc, cx: &mut AsyncAppContext, ) -> Result> { - let (tree, scan_states_tx) = LocalWorktree::new(path, languages, fs.clone(), cx).await?; + let (tree, scan_states_tx) = + LocalWorktree::new(rpc, path, fs.clone(), languages, cx).await?; tree.update(cx, |tree, cx| { let tree = tree.as_local_mut().unwrap(); let abs_path = tree.snapshot.abs_path.clone(); @@ -658,14 +660,16 @@ pub struct LocalWorktree { peers: HashMap, languages: Arc, queued_operations: Vec<(u64, Operation)>, + rpc: Arc, fs: Arc, } impl LocalWorktree { async fn new( + rpc: Arc, path: impl Into>, - languages: Arc, fs: Arc, + languages: Arc, cx: &mut AsyncAppContext, ) -> Result<(ModelHandle, Sender)> { let abs_path = path.into(); @@ -716,6 +720,7 @@ impl LocalWorktree { queued_operations: Default::default(), peers: Default::default(), languages, + rpc, fs, }; @@ -976,11 +981,11 @@ impl LocalWorktree { pub fn share( &mut self, - rpc: Arc, cx: &mut ModelContext, ) -> Task> { let snapshot = self.snapshot(); let share_request = self.share_request(cx); + let rpc = self.rpc.clone(); cx.spawn(|this, mut cx| async move { let share_request = share_request.await; let share_response = rpc.request(share_request).await?; @@ -1018,7 +1023,6 @@ impl LocalWorktree { let worktree = worktree.as_local_mut().unwrap(); worktree.share = Some(ShareState { - rpc, remote_id: share_response.worktree_id, snapshots_tx: snapshots_to_send_tx, _subscriptions, @@ -1078,7 +1082,6 @@ impl fmt::Debug for LocalWorktree { } struct ShareState { - rpc: Arc, remote_id: u64, snapshots_tx: Sender, _subscriptions: Vec, @@ -1551,7 +1554,7 @@ impl File { Worktree::Local(worktree) => worktree .share .as_ref() - .map(|share| (share.rpc.clone(), share.remote_id)), + .map(|share| (worktree.rpc.clone(), share.remote_id)), Worktree::Remote(worktree) => Some((worktree.rpc.clone(), worktree.remote_id)), } { cx.spawn(|worktree, mut cx| async move { @@ -1635,14 +1638,12 @@ impl File { ) -> Task> { self.worktree.update(cx, |worktree, cx| match worktree { Worktree::Local(worktree) => { - let rpc = worktree - .share - .as_ref() - .map(|share| (share.rpc.clone(), share.remote_id)); + let rpc = worktree.rpc.clone(); + let worktree_id = worktree.share.as_ref().map(|share| share.remote_id); let save = worktree.save(self.path.clone(), text, cx); cx.background().spawn(async move { let entry = save.await?; - if let Some((rpc, worktree_id)) = rpc { + if let Some(worktree_id) = worktree_id { rpc.send(proto::BufferSaved { worktree_id, buffer_id, @@ -2561,9 +2562,10 @@ mod tests { .unwrap(); let tree = Worktree::open_local( + rpc::Client::new(), root_link_path, - Default::default(), Arc::new(RealFs), + Default::default(), &mut cx.to_async(), ) .await @@ -2617,9 +2619,10 @@ mod tests { } })); let tree = Worktree::open_local( + rpc::Client::new(), dir.path(), - Default::default(), Arc::new(RealFs), + Default::default(), &mut cx.to_async(), ) .await @@ -2661,9 +2664,10 @@ mod tests { "file1": "the old contents", })); let tree = Worktree::open_local( + rpc::Client::new(), dir.path(), - Arc::new(LanguageRegistry::new()), Arc::new(RealFs), + Default::default(), &mut cx.to_async(), ) .await @@ -2690,9 +2694,10 @@ mod tests { let file_path = dir.path().join("file1"); let tree = Worktree::open_local( + rpc::Client::new(), file_path.clone(), - Arc::new(LanguageRegistry::new()), Arc::new(RealFs), + Default::default(), &mut cx.to_async(), ) .await @@ -2732,9 +2737,10 @@ mod tests { })); let tree = Worktree::open_local( + rpc::Client::new(), dir.path(), - Default::default(), Arc::new(RealFs), + Default::default(), &mut cx.to_async(), ) .await @@ -2886,9 +2892,10 @@ mod tests { })); let tree = Worktree::open_local( + rpc::Client::new(), dir.path(), - Default::default(), Arc::new(RealFs), + Default::default(), &mut cx.to_async(), ) .await From 176ca9086ed20431805337f4ba66e98168af052b Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 16 Sep 2021 11:25:31 -0700 Subject: [PATCH 02/43] Rename OpenWorktree message to JoinWorktree We'll repurpose the name 'OpenWorktree' for opening a local worktree while signed in, and advertising the open status to the worktree's collaborators. --- server/src/rpc.rs | 6 +++--- zed/src/worktree.rs | 14 +++++++------- zrpc/proto/zed.proto | 8 ++++---- zrpc/src/proto.rs | 8 ++++---- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index 3564b4285df93..ac04e5ee24f07 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -293,7 +293,7 @@ impl Server { async fn join_worktree( self: Arc, - request: TypedEnvelope, + request: TypedEnvelope, ) -> tide::Result<()> { let worktree_id = request.payload.worktree_id; let access_token = &request.payload.access_token; @@ -334,7 +334,7 @@ impl Server { self.peer .respond( request.receipt(), - proto::OpenWorktreeResponse { + proto::JoinWorktreeResponse { worktree_id, worktree: Some(proto::Worktree { root_name: worktree.root_name.clone(), @@ -349,7 +349,7 @@ impl Server { self.peer .respond( request.receipt(), - proto::OpenWorktreeResponse { + proto::JoinWorktreeResponse { worktree_id, worktree: None, replica_id: 0, diff --git a/zed/src/worktree.rs b/zed/src/worktree.rs index a6765a2d26afe..79f76a97089e1 100644 --- a/zed/src/worktree.rs +++ b/zed/src/worktree.rs @@ -117,7 +117,7 @@ impl Worktree { cx: &mut AsyncAppContext, ) -> Result> { let response = rpc - .request(proto::OpenWorktree { + .request(proto::JoinWorktree { worktree_id: id, access_token, }) @@ -127,18 +127,18 @@ impl Worktree { } async fn remote( - open_response: proto::OpenWorktreeResponse, + join_response: proto::JoinWorktreeResponse, rpc: Arc, languages: Arc, cx: &mut AsyncAppContext, ) -> Result> { - let worktree = open_response + let worktree = join_response .worktree .ok_or_else(|| anyhow!("empty worktree"))?; - let remote_id = open_response.worktree_id; - let replica_id = open_response.replica_id as ReplicaId; - let peers = open_response.peers; + let remote_id = join_response.worktree_id; + let replica_id = join_response.replica_id as ReplicaId; + let peers = join_response.peers; let root_char_bag: CharBag = worktree .root_name .chars() @@ -2780,7 +2780,7 @@ mod tests { }) .await; let remote = Worktree::remote( - proto::OpenWorktreeResponse { + proto::JoinWorktreeResponse { worktree_id, worktree: share_request.worktree, replica_id: 1, diff --git a/zrpc/proto/zed.proto b/zrpc/proto/zed.proto index 4e42441eb276a..e6b400bef98ba 100644 --- a/zrpc/proto/zed.proto +++ b/zrpc/proto/zed.proto @@ -11,8 +11,8 @@ message Envelope { Ping ping = 6; ShareWorktree share_worktree = 7; ShareWorktreeResponse share_worktree_response = 8; - OpenWorktree open_worktree = 9; - OpenWorktreeResponse open_worktree_response = 10; + JoinWorktree join_worktree = 9; + JoinWorktreeResponse join_worktree_response = 10; UpdateWorktree update_worktree = 11; CloseWorktree close_worktree = 12; OpenBuffer open_buffer = 13; @@ -57,12 +57,12 @@ message ShareWorktreeResponse { string access_token = 2; } -message OpenWorktree { +message JoinWorktree { uint64 worktree_id = 1; string access_token = 2; } -message OpenWorktreeResponse { +message JoinWorktreeResponse { uint64 worktree_id = 1; Worktree worktree = 2; uint32 replica_id = 3; diff --git a/zrpc/src/proto.rs b/zrpc/src/proto.rs index b2d4de3bbf501..b00bca89c2404 100644 --- a/zrpc/src/proto.rs +++ b/zrpc/src/proto.rs @@ -138,8 +138,8 @@ messages!( LeaveChannel, OpenBuffer, OpenBufferResponse, - OpenWorktree, - OpenWorktreeResponse, + JoinWorktree, + JoinWorktreeResponse, Ping, RemovePeer, SaveBuffer, @@ -156,7 +156,7 @@ request_messages!( (GetUsers, GetUsersResponse), (JoinChannel, JoinChannelResponse), (OpenBuffer, OpenBufferResponse), - (OpenWorktree, OpenWorktreeResponse), + (JoinWorktree, JoinWorktreeResponse), (Ping, Ack), (SaveBuffer, BufferSaved), (UpdateBuffer, Ack), @@ -172,7 +172,7 @@ entity_messages!( CloseBuffer, CloseWorktree, OpenBuffer, - OpenWorktree, + JoinWorktree, RemovePeer, SaveBuffer, UpdateBuffer, From e3c0d6980cf5758aa1ccefe6280cf3772860dadc Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 16 Sep 2021 18:39:29 -0700 Subject: [PATCH 03/43] Switch to a new flow for advertising, sharing and joining worktrees Now, when you open a local worktree, we immediately send an `OpenWorktree` message to the server, telling it the name of the folder that you've opened, and the names of all the collaborators (based on a `.zed.toml` file). The server responds with a unique id for the worktree. When starting share this local worktree, you now include this previously-assigned id in the `ShareWorktree` message. When joining a worktree, there is no longer a need to provide an access token. The access is controlled by the set of "collaborator logins" that were provided when the worktree was initially opened by the host. --- server/src/rpc.rs | 498 ++++++++++++++++++++++++++++--------------- zed/src/workspace.rs | 90 +++----- zed/src/worktree.rs | 208 ++++++++++++++---- zrpc/proto/zed.proto | 24 ++- zrpc/src/proto.rs | 10 +- 5 files changed, 547 insertions(+), 283 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index ac04e5ee24f07..bbc0b090ca6af 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -1,6 +1,6 @@ use super::{ auth, - db::{ChannelId, MessageId, UserId}, + db::{ChannelId, MessageId, User, UserId}, AppState, }; use anyhow::anyhow; @@ -25,7 +25,6 @@ use tide::{ }; use time::OffsetDateTime; use zrpc::{ - auth::random_token, proto::{self, AnyTypedEnvelope, EnvelopedMessage}, Connection, ConnectionId, Peer, TypedEnvelope, }; @@ -50,6 +49,7 @@ pub struct Server { struct ServerState { connections: HashMap, pub worktrees: HashMap, + visible_worktrees_by_github_login: HashMap>, channels: HashMap, next_worktree_id: u64, } @@ -61,11 +61,15 @@ struct ConnectionState { } struct Worktree { - host_connection_id: Option, + host_connection_id: ConnectionId, + collaborator_github_logins: Vec, + root_name: String, + share: Option, +} + +struct WorktreeShare { guest_connection_ids: HashMap, active_replica_ids: HashSet, - access_token: String, - root_name: String, entries: HashMap, } @@ -93,10 +97,12 @@ impl Server { server .add_handler(Server::ping) + .add_handler(Server::open_worktree) + .add_handler(Server::close_worktree) .add_handler(Server::share_worktree) + .add_handler(Server::unshare_worktree) .add_handler(Server::join_worktree) .add_handler(Server::update_worktree) - .add_handler(Server::close_worktree) .add_handler(Server::open_buffer) .add_handler(Server::close_buffer) .add_handler(Server::update_buffer) @@ -231,13 +237,15 @@ impl Server { } for worktree_id in connection.worktrees { if let Some(worktree) = state.worktrees.get_mut(&worktree_id) { - if worktree.host_connection_id == Some(connection_id) { - worktree_ids.push(worktree_id); - } else if let Some(replica_id) = - worktree.guest_connection_ids.remove(&connection_id) - { - worktree.active_replica_ids.remove(&replica_id); + if worktree.host_connection_id == connection_id { worktree_ids.push(worktree_id); + } else if let Some(share_state) = worktree.share.as_mut() { + if let Some(replica_id) = + share_state.guest_connection_ids.remove(&connection_id) + { + share_state.active_replica_ids.remove(&replica_id); + worktree_ids.push(worktree_id); + } } } } @@ -250,14 +258,30 @@ impl Server { Ok(()) } + async fn open_worktree( + self: Arc, + request: TypedEnvelope, + ) -> tide::Result<()> { + let receipt = request.receipt(); + + let mut state = self.state.write().await; + let worktree_id = state.add_worktree(Worktree { + host_connection_id: request.sender_id, + collaborator_github_logins: request.payload.collaborator_logins, + root_name: request.payload.root_name, + share: None, + }); + + self.peer + .respond(receipt, proto::OpenWorktreeResponse { worktree_id }) + .await?; + Ok(()) + } + async fn share_worktree( self: Arc, mut request: TypedEnvelope, ) -> tide::Result<()> { - let mut state = self.state.write().await; - let worktree_id = state.next_worktree_id; - state.next_worktree_id += 1; - let access_token = random_token(); let worktree = request .payload .worktree @@ -267,27 +291,58 @@ impl Server { .into_iter() .map(|entry| (entry.id, entry)) .collect(); - state.worktrees.insert( - worktree_id, - Worktree { - host_connection_id: Some(request.sender_id), + let mut state = self.state.write().await; + if let Some(worktree) = state.worktrees.get_mut(&worktree.id) { + worktree.share = Some(WorktreeShare { guest_connection_ids: Default::default(), active_replica_ids: Default::default(), - access_token: access_token.clone(), - root_name: mem::take(&mut worktree.root_name), entries, - }, - ); + }); + self.peer + .respond(request.receipt(), proto::ShareWorktreeResponse {}) + .await?; + } else { + self.peer + .respond_with_error( + request.receipt(), + proto::Error { + message: "no such worktree".to_string(), + }, + ) + .await?; + } + Ok(()) + } + + async fn unshare_worktree( + self: Arc, + request: TypedEnvelope, + ) -> tide::Result<()> { + let worktree_id = request.payload.worktree_id; + + let connection_ids; + { + let mut state = self.state.write().await; + let worktree = state.write_worktree(worktree_id, request.sender_id)?; + if worktree.host_connection_id != request.sender_id { + return Err(anyhow!("no such worktree"))?; + } + + connection_ids = worktree.connection_ids(); + worktree.share.take(); + for connection_id in &connection_ids { + if let Some(connection) = state.connections.get_mut(connection_id) { + connection.worktrees.remove(&worktree_id); + } + } + } + + broadcast(request.sender_id, connection_ids, |conn_id| { + self.peer + .send(conn_id, proto::UnshareWorktree { worktree_id }) + }) + .await?; - self.peer - .respond( - request.receipt(), - proto::ShareWorktreeResponse { - worktree_id, - access_token, - }, - ) - .await?; Ok(()) } @@ -296,67 +351,112 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let worktree_id = request.payload.worktree_id; - let access_token = &request.payload.access_token; + let user = self.user_for_connection(request.sender_id).await?; + let response; + let connection_ids; let mut state = self.state.write().await; - if let Some((peer_replica_id, worktree)) = - state.join_worktree(request.sender_id, worktree_id, access_token) - { - let mut peers = Vec::new(); - if let Some(host_connection_id) = worktree.host_connection_id { + match state.join_worktree(request.sender_id, &user, worktree_id) { + Ok((peer_replica_id, worktree)) => { + let share = worktree.share()?; + let peer_count = share.guest_connection_ids.len(); + let mut peers = Vec::with_capacity(peer_count); peers.push(proto::Peer { - peer_id: host_connection_id.0, + peer_id: worktree.host_connection_id.0, replica_id: 0, }); + for (peer_conn_id, peer_replica_id) in &share.guest_connection_ids { + if *peer_conn_id != request.sender_id { + peers.push(proto::Peer { + peer_id: peer_conn_id.0, + replica_id: *peer_replica_id as u32, + }); + } + } + connection_ids = worktree.connection_ids(); + response = proto::JoinWorktreeResponse { + worktree: Some(proto::Worktree { + id: worktree_id, + root_name: worktree.root_name.clone(), + entries: share.entries.values().cloned().collect(), + }), + replica_id: peer_replica_id as u32, + peers, + }; } - for (peer_conn_id, peer_replica_id) in &worktree.guest_connection_ids { - if *peer_conn_id != request.sender_id { - peers.push(proto::Peer { - peer_id: peer_conn_id.0, - replica_id: *peer_replica_id as u32, - }); + Err(error) => { + self.peer + .respond_with_error( + request.receipt(), + proto::Error { + message: error.to_string(), + }, + ) + .await?; + return Ok(()); + } + } + + broadcast(request.sender_id, connection_ids, |conn_id| { + self.peer.send( + conn_id, + proto::AddPeer { + worktree_id, + peer: Some(proto::Peer { + peer_id: request.sender_id.0, + replica_id: response.replica_id, + }), + }, + ) + }) + .await?; + self.peer.respond(request.receipt(), response).await?; + + Ok(()) + } + + async fn close_worktree( + self: Arc, + request: TypedEnvelope, + ) -> tide::Result<()> { + let worktree_id = request.payload.worktree_id; + let connection_ids; + let mut is_host = false; + let mut is_guest = false; + { + let mut state = self.state.write().await; + let worktree = state.write_worktree(worktree_id, request.sender_id)?; + connection_ids = worktree.connection_ids(); + + if worktree.host_connection_id == request.sender_id { + is_host = true; + state.remove_worktree(worktree_id); + } else { + let share = worktree.share_mut()?; + if let Some(replica_id) = share.guest_connection_ids.remove(&request.sender_id) { + is_guest = true; + share.active_replica_ids.remove(&replica_id); } } + } - broadcast(request.sender_id, worktree.connection_ids(), |conn_id| { + if is_host { + broadcast(request.sender_id, connection_ids, |conn_id| { + self.peer + .send(conn_id, proto::UnshareWorktree { worktree_id }) + }) + .await?; + } else if is_guest { + broadcast(request.sender_id, connection_ids, |conn_id| { self.peer.send( conn_id, - proto::AddPeer { + proto::RemovePeer { worktree_id, - peer: Some(proto::Peer { - peer_id: request.sender_id.0, - replica_id: peer_replica_id as u32, - }), + peer_id: request.sender_id.0, }, ) }) - .await?; - self.peer - .respond( - request.receipt(), - proto::JoinWorktreeResponse { - worktree_id, - worktree: Some(proto::Worktree { - root_name: worktree.root_name.clone(), - entries: worktree.entries.values().cloned().collect(), - }), - replica_id: peer_replica_id as u32, - peers, - }, - ) - .await?; - } else { - self.peer - .respond( - request.receipt(), - proto::JoinWorktreeResponse { - worktree_id, - worktree: None, - replica_id: 0, - peers: Vec::new(), - }, - ) - .await?; + .await? } Ok(()) @@ -369,12 +469,14 @@ impl Server { { let mut state = self.state.write().await; let worktree = state.write_worktree(request.payload.worktree_id, request.sender_id)?; + let share = worktree.share_mut()?; + for entry_id in &request.payload.removed_entries { - worktree.entries.remove(&entry_id); + share.entries.remove(&entry_id); } for entry in &request.payload.updated_entries { - worktree.entries.insert(entry.id, entry.clone()); + share.entries.insert(entry.id, entry.clone()); } } @@ -383,38 +485,6 @@ impl Server { Ok(()) } - async fn close_worktree( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let connection_ids; - { - let mut state = self.state.write().await; - let worktree = state.write_worktree(request.payload.worktree_id, request.sender_id)?; - connection_ids = worktree.connection_ids(); - if worktree.host_connection_id == Some(request.sender_id) { - worktree.host_connection_id = None; - } else if let Some(replica_id) = - worktree.guest_connection_ids.remove(&request.sender_id) - { - worktree.active_replica_ids.remove(&replica_id); - } - } - - broadcast(request.sender_id, connection_ids, |conn_id| { - self.peer.send( - conn_id, - proto::RemovePeer { - worktree_id: request.payload.worktree_id, - peer_id: request.sender_id.0, - }, - ) - }) - .await?; - - Ok(()) - } - async fn open_buffer( self: Arc, request: TypedEnvelope, @@ -426,7 +496,7 @@ impl Server { .read() .await .read_worktree(worktree_id, request.sender_id)? - .host_connection_id()?; + .host_connection_id; let response = self .peer @@ -445,7 +515,7 @@ impl Server { .read() .await .read_worktree(request.payload.worktree_id, request.sender_id)? - .host_connection_id()?; + .host_connection_id; self.peer .forward_send(request.sender_id, host_connection_id, request.payload) @@ -463,8 +533,9 @@ impl Server { { let state = self.state.read().await; let worktree = state.read_worktree(request.payload.worktree_id, request.sender_id)?; - host = worktree.host_connection_id()?; + host = worktree.host_connection_id; guests = worktree + .share()? .guest_connection_ids .keys() .copied() @@ -785,6 +856,24 @@ impl Server { Ok(()) } + async fn user_for_connection(&self, connection_id: ConnectionId) -> tide::Result { + let user_id = self + .state + .read() + .await + .connections + .get(&connection_id) + .ok_or_else(|| anyhow!("no such connection"))? + .user_id; + Ok(self + .app_state + .db + .get_users_by_ids(user_id, Some(user_id).into_iter()) + .await? + .pop() + .ok_or_else(|| anyhow!("no such user"))?) + } + async fn broadcast_in_worktree( &self, worktree_id: u64, @@ -860,30 +949,34 @@ impl ServerState { fn join_worktree( &mut self, connection_id: ConnectionId, + user: &User, worktree_id: u64, - access_token: &str, - ) -> Option<(ReplicaId, &Worktree)> { - if let Some(worktree) = self.worktrees.get_mut(&worktree_id) { - if access_token == worktree.access_token { - if let Some(connection) = self.connections.get_mut(&connection_id) { - connection.worktrees.insert(worktree_id); - } + ) -> tide::Result<(ReplicaId, &Worktree)> { + let connection = self + .connections + .get_mut(&connection_id) + .ok_or_else(|| anyhow!("no such connection"))?; + let worktree = self + .worktrees + .get_mut(&worktree_id) + .ok_or_else(|| anyhow!("no such worktree"))?; + if !worktree + .collaborator_github_logins + .contains(&user.github_login) + { + Err(anyhow!("no such worktree"))?; + } - let mut replica_id = 1; - while worktree.active_replica_ids.contains(&replica_id) { - replica_id += 1; - } - worktree.active_replica_ids.insert(replica_id); - worktree - .guest_connection_ids - .insert(connection_id, replica_id); - Some((replica_id, worktree)) - } else { - None - } - } else { - None + let share = worktree.share_mut()?; + connection.worktrees.insert(worktree_id); + + let mut replica_id = 1; + while share.active_replica_ids.contains(&replica_id) { + replica_id += 1; } + share.active_replica_ids.insert(replica_id); + share.guest_connection_ids.insert(connection_id, replica_id); + return Ok((replica_id, worktree)); } fn read_worktree( @@ -896,8 +989,11 @@ impl ServerState { .get(&worktree_id) .ok_or_else(|| anyhow!("worktree not found"))?; - if worktree.host_connection_id == Some(connection_id) - || worktree.guest_connection_ids.contains_key(&connection_id) + if worktree.host_connection_id == connection_id + || worktree + .share()? + .guest_connection_ids + .contains_key(&connection_id) { Ok(worktree) } else { @@ -919,8 +1015,10 @@ impl ServerState { .get_mut(&worktree_id) .ok_or_else(|| anyhow!("worktree not found"))?; - if worktree.host_connection_id == Some(connection_id) - || worktree.guest_connection_ids.contains_key(&connection_id) + if worktree.host_connection_id == connection_id + || worktree.share.as_ref().map_or(false, |share| { + share.guest_connection_ids.contains_key(&connection_id) + }) { Ok(worktree) } else { @@ -931,21 +1029,69 @@ impl ServerState { ))? } } + + fn add_worktree(&mut self, worktree: Worktree) -> u64 { + let worktree_id = self.next_worktree_id; + for collaborator_login in &worktree.collaborator_github_logins { + self.visible_worktrees_by_github_login + .entry(collaborator_login.clone()) + .or_default() + .insert(worktree_id); + } + self.next_worktree_id += 1; + self.worktrees.insert(worktree_id, worktree); + worktree_id + } + + fn remove_worktree(&mut self, worktree_id: u64) { + let worktree = self.worktrees.remove(&worktree_id).unwrap(); + if let Some(connection) = self.connections.get_mut(&worktree.host_connection_id) { + connection.worktrees.remove(&worktree_id); + } + if let Some(share) = worktree.share { + for connection_id in share.guest_connection_ids.keys() { + if let Some(connection) = self.connections.get_mut(connection_id) { + connection.worktrees.remove(&worktree_id); + } + } + } + for collaborator_login in worktree.collaborator_github_logins { + if let Some(visible_worktrees) = self + .visible_worktrees_by_github_login + .get_mut(&collaborator_login) + { + visible_worktrees.remove(&worktree_id); + } + } + } } impl Worktree { pub fn connection_ids(&self) -> Vec { - self.guest_connection_ids - .keys() - .copied() - .chain(self.host_connection_id) - .collect() + if let Some(share) = &self.share { + share + .guest_connection_ids + .keys() + .copied() + .chain(Some(self.host_connection_id)) + .collect() + } else { + vec![self.host_connection_id] + } } - fn host_connection_id(&self) -> tide::Result { + fn share(&self) -> tide::Result<&WorktreeShare> { Ok(self - .host_connection_id - .ok_or_else(|| anyhow!("host disconnected from worktree"))?) + .share + .as_ref() + .ok_or_else(|| anyhow!("worktree is not shared"))?) + } + + fn share_mut(&mut self) -> tide::Result<&mut WorktreeShare> { + Ok(self + .share + .as_mut() + .ok_or_else(|| anyhow!("worktree is not shared"))?) } } @@ -1066,6 +1212,7 @@ mod tests { fs.insert_tree( "/a", json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, "a.txt": "a-contents", "b.txt": "b-contents", }), @@ -1083,7 +1230,7 @@ mod tests { worktree_a .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) .await; - let (worktree_id, worktree_token) = worktree_a + let worktree_id = worktree_a .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx)) .await .unwrap(); @@ -1092,7 +1239,6 @@ mod tests { let worktree_b = Worktree::open_remote( client_b.clone(), worktree_id, - worktree_token, lang_registry.clone(), &mut cx_b.to_async(), ) @@ -1173,6 +1319,7 @@ mod tests { fs.insert_tree( "/a", json!({ + ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, "file1": "", "file2": "" }), @@ -1191,7 +1338,7 @@ mod tests { worktree_a .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) .await; - let (worktree_id, worktree_token) = worktree_a + let worktree_id = worktree_a .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx)) .await .unwrap(); @@ -1200,7 +1347,6 @@ mod tests { let worktree_b = Worktree::open_remote( client_b.clone(), worktree_id, - worktree_token.clone(), lang_registry.clone(), &mut cx_b.to_async(), ) @@ -1209,7 +1355,6 @@ mod tests { let worktree_c = Worktree::open_remote( client_c.clone(), worktree_id, - worktree_token, lang_registry.clone(), &mut cx_c.to_async(), ) @@ -1273,17 +1418,17 @@ mod tests { .unwrap(); worktree_b - .condition(&cx_b, |tree, _| tree.file_count() == 3) + .condition(&cx_b, |tree, _| tree.file_count() == 4) .await; worktree_c - .condition(&cx_c, |tree, _| tree.file_count() == 3) + .condition(&cx_c, |tree, _| tree.file_count() == 4) .await; worktree_b.read_with(&cx_b, |tree, _| { assert_eq!( tree.paths() .map(|p| p.to_string_lossy()) .collect::>(), - &["file1", "file3", "file4"] + &[".zed.toml", "file1", "file3", "file4"] ) }); worktree_c.read_with(&cx_c, |tree, _| { @@ -1291,7 +1436,7 @@ mod tests { tree.paths() .map(|p| p.to_string_lossy()) .collect::>(), - &["file1", "file3", "file4"] + &[".zed.toml", "file1", "file3", "file4"] ) }); } @@ -1308,12 +1453,18 @@ mod tests { // Share a local worktree as client A let fs = Arc::new(FakeFs::new()); - fs.save(Path::new("/a.txt"), &"a-contents".into()) - .await - .unwrap(); + fs.insert_tree( + "/dir", + json!({ + ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, + "a.txt": "a-contents", + }), + ) + .await; + let worktree_a = Worktree::open_local( client_a.clone(), - "/".as_ref(), + "/dir".as_ref(), fs, lang_registry.clone(), &mut cx_a.to_async(), @@ -1323,7 +1474,7 @@ mod tests { worktree_a .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) .await; - let (worktree_id, worktree_token) = worktree_a + let worktree_id = worktree_a .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx)) .await .unwrap(); @@ -1332,7 +1483,6 @@ mod tests { let worktree_b = Worktree::open_remote( client_b.clone(), worktree_id, - worktree_token, lang_registry.clone(), &mut cx_b.to_async(), ) @@ -1388,12 +1538,17 @@ mod tests { // Share a local worktree as client A let fs = Arc::new(FakeFs::new()); - fs.save(Path::new("/a.txt"), &"a-contents".into()) - .await - .unwrap(); + fs.insert_tree( + "/dir", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.txt": "a-contents", + }), + ) + .await; let worktree_a = Worktree::open_local( client_a.clone(), - "/".as_ref(), + "/dir".as_ref(), fs, lang_registry.clone(), &mut cx_a.to_async(), @@ -1403,7 +1558,7 @@ mod tests { worktree_a .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) .await; - let (worktree_id, worktree_token) = worktree_a + let worktree_id = worktree_a .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx)) .await .unwrap(); @@ -1412,7 +1567,6 @@ mod tests { let worktree_b = Worktree::open_remote( client_b.clone(), worktree_id, - worktree_token, lang_registry.clone(), &mut cx_b.to_async(), ) @@ -1450,6 +1604,7 @@ mod tests { fs.insert_tree( "/a", json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, "a.txt": "a-contents", "b.txt": "b-contents", }), @@ -1467,7 +1622,7 @@ mod tests { worktree_a .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) .await; - let (worktree_id, worktree_token) = worktree_a + let worktree_id = worktree_a .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx)) .await .unwrap(); @@ -1476,7 +1631,6 @@ mod tests { let _worktree_b = Worktree::open_remote( client_b.clone(), worktree_id, - worktree_token, lang_registry.clone(), &mut cx_b.to_async(), ) diff --git a/zed/src/workspace.rs b/zed/src/workspace.rs index 8582745ffe476..bc918bb30fd5c 100644 --- a/zed/src/workspace.rs +++ b/zed/src/workspace.rs @@ -11,10 +11,11 @@ use crate::{ rpc, settings::Settings, user, + util::TryFutureExt as _, worktree::{File, Worktree}, AppState, Authenticate, }; -use anyhow::{anyhow, Result}; +use anyhow::Result; use gpui::{ action, elements::*, @@ -52,12 +53,10 @@ pub fn init(cx: &mut MutableAppContext) { open_paths(action, cx).detach() }); cx.add_global_action(open_new); - cx.add_global_action(join_worktree); cx.add_action(Workspace::save_active_item); cx.add_action(Workspace::debug_elements); cx.add_action(Workspace::open_new_file); cx.add_action(Workspace::share_worktree); - cx.add_action(Workspace::join_worktree); cx.add_action(Workspace::toggle_sidebar_item); cx.add_bindings(vec![ Binding::new("cmd-s", Save, None), @@ -129,14 +128,6 @@ fn open_new(action: &OpenNew, cx: &mut MutableAppContext) { }); } -fn join_worktree(action: &JoinWorktree, cx: &mut MutableAppContext) { - cx.add_window(window_options(), |cx| { - let mut view = Workspace::new(action.0.as_ref(), cx); - view.join_worktree(action, cx); - view - }); -} - fn window_options() -> WindowOptions<'static> { WindowOptions { bounds: RectF::new(vec2f(0., 0.), vec2f(1024., 768.)), @@ -818,67 +809,46 @@ impl Workspace { fn share_worktree(&mut self, _: &ShareWorktree, cx: &mut ViewContext) { let rpc = self.rpc.clone(); - let platform = cx.platform(); - - let task = cx.spawn(|this, mut cx| async move { - rpc.authenticate_and_connect(&cx).await?; - - let share_task = this.update(&mut cx, |this, cx| { - let worktree = this.worktrees.iter().next()?; - worktree.update(cx, |worktree, cx| { - let worktree = worktree.as_local_mut()?; - Some(worktree.share(cx)) - }) - }); + cx.spawn(|this, mut cx| { + async move { + rpc.authenticate_and_connect(&cx).await?; + + let share_task = this.update(&mut cx, |this, cx| { + let worktree = this.worktrees.iter().next()?; + worktree.update(cx, |worktree, cx| { + let worktree = worktree.as_local_mut()?; + Some(worktree.share(cx)) + }) + }); - if let Some(share_task) = share_task { - let (worktree_id, access_token) = share_task.await?; - let worktree_url = rpc::encode_worktree_url(worktree_id, &access_token); - log::info!("wrote worktree url to clipboard: {}", worktree_url); - platform.write_to_clipboard(ClipboardItem::new(worktree_url)); - } - surf::Result::Ok(()) - }); + if let Some(share_task) = share_task { + share_task.await?; + } - cx.spawn(|_, _| async move { - if let Err(e) = task.await { - log::error!("sharing failed: {:?}", e); + Ok(()) } + .log_err() }) .detach(); } - fn join_worktree(&mut self, _: &JoinWorktree, cx: &mut ViewContext) { + fn join_worktree(&mut self, id: u64, cx: &mut ViewContext) { let rpc = self.rpc.clone(); let languages = self.languages.clone(); - let task = cx.spawn(|this, mut cx| async move { - rpc.authenticate_and_connect(&cx).await?; - - let worktree_url = cx - .platform() - .read_from_clipboard() - .ok_or_else(|| anyhow!("failed to read url from clipboard"))?; - let (worktree_id, access_token) = rpc::decode_worktree_url(worktree_url.text()) - .ok_or_else(|| anyhow!("failed to decode worktree url"))?; - log::info!("read worktree url from clipboard: {}", worktree_url.text()); - - let worktree = - Worktree::open_remote(rpc.clone(), worktree_id, access_token, languages, &mut cx) - .await?; - this.update(&mut cx, |workspace, cx| { - cx.observe(&worktree, |_, _, cx| cx.notify()).detach(); - workspace.worktrees.insert(worktree); - cx.notify(); - }); - - surf::Result::Ok(()) - }); + cx.spawn(|this, mut cx| { + async move { + rpc.authenticate_and_connect(&cx).await?; + let worktree = Worktree::open_remote(rpc.clone(), id, languages, &mut cx).await?; + this.update(&mut cx, |workspace, cx| { + cx.observe(&worktree, |_, _, cx| cx.notify()).detach(); + workspace.worktrees.insert(worktree); + cx.notify(); + }); - cx.spawn(|_, _| async move { - if let Err(e) = task.await { - log::error!("joining failed: {}", e); + Ok(()) } + .log_err() }) .detach(); } diff --git a/zed/src/worktree.rs b/zed/src/worktree.rs index 79f76a97089e1..60d453b4083e9 100644 --- a/zed/src/worktree.rs +++ b/zed/src/worktree.rs @@ -7,7 +7,7 @@ use crate::{ fuzzy, fuzzy::CharBag, language::LanguageRegistry, - rpc::{self, proto}, + rpc::{self, proto, Status}, time::{self, ReplicaId}, util::{Bias, TryFutureExt}, }; @@ -27,6 +27,7 @@ use postage::{ prelude::{Sink as _, Stream as _}, watch, }; +use serde::Deserialize; use smol::channel::{self, Sender}; use std::{ cmp::{self, Ordering}, @@ -67,9 +68,9 @@ impl Entity for Worktree { fn release(&mut self, cx: &mut MutableAppContext) { let rpc = match self { Self::Local(tree) => tree - .share - .as_ref() - .map(|share| (tree.rpc.clone(), share.remote_id)), + .remote_id + .borrow() + .map(|remote_id| (tree.rpc.clone(), remote_id)), Self::Remote(tree) => Some((tree.rpc.clone(), tree.remote_id)), }; @@ -112,17 +113,10 @@ impl Worktree { pub async fn open_remote( rpc: Arc, id: u64, - access_token: String, languages: Arc, cx: &mut AsyncAppContext, ) -> Result> { - let response = rpc - .request(proto::JoinWorktree { - worktree_id: id, - access_token, - }) - .await?; - + let response = rpc.request(proto::JoinWorktree { worktree_id: id }).await?; Worktree::remote(response, rpc, languages, cx).await } @@ -136,7 +130,7 @@ impl Worktree { .worktree .ok_or_else(|| anyhow!("empty worktree"))?; - let remote_id = join_response.worktree_id; + let remote_id = worktree.id; let replica_id = join_response.replica_id as ReplicaId; let peers = join_response.peers; let root_char_bag: CharBag = worktree @@ -650,10 +644,13 @@ impl Deref for Worktree { pub struct LocalWorktree { snapshot: Snapshot, + config: WorktreeConfig, background_snapshot: Arc>, last_scan_state_rx: watch::Receiver, _background_scanner_task: Option>, + _maintain_remote_id_task: Task>, poll_task: Option>, + remote_id: watch::Receiver>, share: Option, open_buffers: HashMap>, shared_buffers: HashMap>>, @@ -664,6 +661,11 @@ pub struct LocalWorktree { fs: Arc, } +#[derive(Default, Deserialize)] +struct WorktreeConfig { + collaborators: Vec, +} + impl LocalWorktree { async fn new( rpc: Arc, @@ -684,6 +686,13 @@ impl LocalWorktree { let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect(); let metadata = fs.metadata(&abs_path).await?; + let mut config = WorktreeConfig::default(); + if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await { + if let Ok(parsed) = toml::from_str(&zed_toml) { + config = parsed; + } + } + let (scan_states_tx, scan_states_rx) = smol::channel::unbounded(); let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning); let tree = cx.add_model(move |cx: &mut ModelContext| { @@ -691,7 +700,7 @@ impl LocalWorktree { id: cx.model_id(), scan_id: 0, abs_path, - root_name, + root_name: root_name.clone(), root_char_bag, ignores: Default::default(), entries_by_path: Default::default(), @@ -708,11 +717,48 @@ impl LocalWorktree { )); } + let (mut remote_id_tx, remote_id_rx) = watch::channel(); + let _maintain_remote_id_task = cx.spawn_weak({ + let rpc = rpc.clone(); + move |this, cx| { + async move { + let mut status = rpc.status(); + while let Some(status) = status.recv().await { + if let Some(this) = this.upgrade(&cx) { + let remote_id = if let Status::Connected { .. } = status { + let collaborator_logins = this.read_with(&cx, |this, _| { + this.as_local().unwrap().config.collaborators.clone() + }); + let response = rpc + .request(proto::OpenWorktree { + root_name: root_name.clone(), + collaborator_logins, + }) + .await?; + + Some(response.worktree_id) + } else { + None + }; + if remote_id_tx.send(remote_id).await.is_err() { + break; + } + } + } + Ok(()) + } + .log_err() + } + }); + let tree = Self { snapshot: snapshot.clone(), + config, + remote_id: remote_id_rx, background_snapshot: Arc::new(Mutex::new(snapshot)), last_scan_state_rx, _background_scanner_task: None, + _maintain_remote_id_task, share: None, poll_task: None, open_buffers: Default::default(), @@ -733,13 +779,10 @@ impl LocalWorktree { let tree = this.as_local_mut().unwrap(); if !tree.is_scanning() { if let Some(share) = tree.share.as_ref() { - Some((tree.snapshot(), share.snapshots_tx.clone())) - } else { - None + return Some((tree.snapshot(), share.snapshots_tx.clone())); } - } else { - None } + None }); if let Some((snapshot, snapshots_to_send_tx)) = to_send { @@ -894,6 +937,18 @@ impl LocalWorktree { } } + pub fn next_remote_id(&self) -> impl Future> { + let mut remote_id = self.remote_id.clone(); + async move { + while let Some(remote_id) = remote_id.recv().await { + if remote_id.is_some() { + return remote_id; + } + } + None + } + } + fn is_scanning(&self) -> bool { if let ScanState::Scanning = *self.last_scan_state_rx.borrow() { true @@ -979,17 +1034,19 @@ impl LocalWorktree { }) } - pub fn share( - &mut self, - cx: &mut ModelContext, - ) -> Task> { + pub fn share(&mut self, cx: &mut ModelContext) -> Task> { let snapshot = self.snapshot(); let share_request = self.share_request(cx); let rpc = self.rpc.clone(); cx.spawn(|this, mut cx| async move { - let share_request = share_request.await; + let share_request = if let Some(request) = share_request.await { + request + } else { + return Err(anyhow!("failed to open worktree on the server")); + }; + + let remote_id = share_request.worktree.as_ref().unwrap().id; let share_response = rpc.request(share_request).await?; - let remote_id = share_response.worktree_id; log::info!("sharing worktree {:?}", share_response); let (snapshots_to_send_tx, snapshots_to_send_rx) = @@ -1023,28 +1080,34 @@ impl LocalWorktree { let worktree = worktree.as_local_mut().unwrap(); worktree.share = Some(ShareState { - remote_id: share_response.worktree_id, snapshots_tx: snapshots_to_send_tx, _subscriptions, }); }); - Ok((remote_id, share_response.access_token)) + Ok(remote_id) }) } - fn share_request(&self, cx: &mut ModelContext) -> Task { + fn share_request(&self, cx: &mut ModelContext) -> Task> { + let remote_id = self.next_remote_id(); let snapshot = self.snapshot(); let root_name = self.root_name.clone(); cx.background().spawn(async move { - let entries = snapshot - .entries_by_path - .cursor::<(), ()>() - .map(Into::into) - .collect(); - proto::ShareWorktree { - worktree: Some(proto::Worktree { root_name, entries }), - } + remote_id.await.map(|id| { + let entries = snapshot + .entries_by_path + .cursor::<(), ()>() + .map(Into::into) + .collect(); + proto::ShareWorktree { + worktree: Some(proto::Worktree { + id, + root_name, + entries, + }), + } + }) }) } } @@ -1082,7 +1145,6 @@ impl fmt::Debug for LocalWorktree { } struct ShareState { - remote_id: u64, snapshots_tx: Sender, _subscriptions: Vec, } @@ -1552,9 +1614,9 @@ impl File { self.worktree.update(cx, |worktree, cx| { if let Some((rpc, remote_id)) = match worktree { Worktree::Local(worktree) => worktree - .share - .as_ref() - .map(|share| (worktree.rpc.clone(), share.remote_id)), + .remote_id + .borrow() + .map(|id| (worktree.rpc.clone(), id)), Worktree::Remote(worktree) => Some((worktree.rpc.clone(), worktree.remote_id)), } { cx.spawn(|worktree, mut cx| async move { @@ -1639,7 +1701,7 @@ impl File { self.worktree.update(cx, |worktree, cx| match worktree { Worktree::Local(worktree) => { let rpc = worktree.rpc.clone(); - let worktree_id = worktree.share.as_ref().map(|share| share.remote_id); + let worktree_id = *worktree.remote_id.borrow(); let save = worktree.save(self.path.clone(), text, cx); cx.background().spawn(async move { let entry = save.await?; @@ -2528,6 +2590,7 @@ impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry { #[cfg(test)] mod tests { use super::*; + use crate::fs::FakeFs; use crate::test::*; use anyhow::Result; use fs::RealFs; @@ -2778,10 +2841,10 @@ mod tests { .update(&mut cx, |tree, cx| { tree.as_local().unwrap().share_request(cx) }) - .await; + .await + .unwrap(); let remote = Worktree::remote( proto::JoinWorktreeResponse { - worktree_id, worktree: share_request.worktree, replica_id: 1, peers: Vec::new(), @@ -2925,6 +2988,65 @@ mod tests { }); } + #[gpui::test] + async fn test_open_and_share_worktree(mut cx: gpui::TestAppContext) { + let user_id = 100; + let mut client = rpc::Client::new(); + let server = FakeServer::for_client(user_id, &mut client, &cx).await; + + let fs = Arc::new(FakeFs::new()); + fs.insert_tree( + "/path", + json!({ + "to": { + "the-dir": { + ".zed.toml": r#"collaborators = ["friend-1", "friend-2"]"#, + "a.txt": "a-contents", + }, + }, + }), + ) + .await; + + let worktree = Worktree::open_local( + client.clone(), + "/path/to/the-dir".as_ref(), + fs, + Default::default(), + &mut cx.to_async(), + ) + .await + .unwrap(); + + { + let cx = cx.to_async(); + client.authenticate_and_connect(&cx).await.unwrap(); + } + + let open_worktree = server.receive::().await.unwrap(); + assert_eq!( + open_worktree.payload, + proto::OpenWorktree { + root_name: "the-dir".to_string(), + collaborator_logins: vec!["friend-1".to_string(), "friend-2".to_string()], + } + ); + + server + .respond( + open_worktree.receipt(), + proto::OpenWorktreeResponse { worktree_id: 5 }, + ) + .await; + let remote_id = worktree + .update(&mut cx, |tree, _| tree.as_local().unwrap().next_remote_id()) + .await; + assert_eq!(remote_id, Some(5)); + + cx.update(move |_| drop(worktree)); + server.receive::().await.unwrap(); + } + #[gpui::test(iterations = 100)] fn test_random(mut rng: StdRng) { let operations = env::var("OPERATIONS") diff --git a/zrpc/proto/zed.proto b/zrpc/proto/zed.proto index e6b400bef98ba..b5fe1604aee48 100644 --- a/zrpc/proto/zed.proto +++ b/zrpc/proto/zed.proto @@ -35,6 +35,9 @@ message Envelope { ChannelMessageSent channel_message_sent = 30; GetChannelMessages get_channel_messages = 31; GetChannelMessagesResponse get_channel_messages_response = 32; + OpenWorktree open_worktree = 33; + OpenWorktreeResponse open_worktree_response = 34; + UnshareWorktree unshare_worktree = 35; } } @@ -48,22 +51,30 @@ message Error { string message = 1; } +message OpenWorktree { + string root_name = 1; + repeated string collaborator_logins = 2; +} + +message OpenWorktreeResponse { + uint64 worktree_id = 1; +} + message ShareWorktree { Worktree worktree = 1; } -message ShareWorktreeResponse { +message ShareWorktreeResponse {} + +message UnshareWorktree { uint64 worktree_id = 1; - string access_token = 2; } message JoinWorktree { uint64 worktree_id = 1; - string access_token = 2; } message JoinWorktreeResponse { - uint64 worktree_id = 1; Worktree worktree = 2; uint32 replica_id = 3; repeated Peer peers = 4; @@ -187,8 +198,9 @@ message User { } message Worktree { - string root_name = 1; - repeated Entry entries = 2; + uint64 id = 1; + string root_name = 2; + repeated Entry entries = 3; } message Entry { diff --git a/zrpc/src/proto.rs b/zrpc/src/proto.rs index b00bca89c2404..09348a0fc5d2a 100644 --- a/zrpc/src/proto.rs +++ b/zrpc/src/proto.rs @@ -135,11 +135,13 @@ messages!( GetUsersResponse, JoinChannel, JoinChannelResponse, + JoinWorktree, + JoinWorktreeResponse, LeaveChannel, OpenBuffer, OpenBufferResponse, - JoinWorktree, - JoinWorktreeResponse, + OpenWorktree, + OpenWorktreeResponse, Ping, RemovePeer, SaveBuffer, @@ -147,6 +149,7 @@ messages!( SendChannelMessageResponse, ShareWorktree, ShareWorktreeResponse, + UnshareWorktree, UpdateBuffer, UpdateWorktree, ); @@ -157,10 +160,12 @@ request_messages!( (JoinChannel, JoinChannelResponse), (OpenBuffer, OpenBufferResponse), (JoinWorktree, JoinWorktreeResponse), + (OpenWorktree, OpenWorktreeResponse), (Ping, Ack), (SaveBuffer, BufferSaved), (UpdateBuffer, Ack), (ShareWorktree, ShareWorktreeResponse), + (UnshareWorktree, Ack), (SendChannelMessage, SendChannelMessageResponse), (GetChannelMessages, GetChannelMessagesResponse), ); @@ -175,6 +180,7 @@ entity_messages!( JoinWorktree, RemovePeer, SaveBuffer, + UnshareWorktree, UpdateBuffer, UpdateWorktree, ); From 12fa309b7c3395c3bc0dfa29a139aff4efaeda59 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 17 Sep 2021 14:36:50 +0200 Subject: [PATCH 04/43] Implement `proto::GetCollaborators` request --- server/src/bin/seed.rs | 8 +- server/src/db.rs | 17 +---- server/src/rpc.rs | 161 ++++++++++++++++++++++++++++++++--------- zrpc/proto/zed.proto | 19 +++++ zrpc/src/proto.rs | 3 + 5 files changed, 154 insertions(+), 54 deletions(-) diff --git a/server/src/bin/seed.rs b/server/src/bin/seed.rs index d2427d495c451..4d3fb978dbd7d 100644 --- a/server/src/bin/seed.rs +++ b/server/src/bin/seed.rs @@ -27,8 +27,12 @@ async fn main() { let zed_users = ["nathansobo", "maxbrunsfeld", "as-cii", "iamnbutler"]; let mut zed_user_ids = Vec::::new(); for zed_user in zed_users { - if let Some(user_id) = db.get_user(zed_user).await.expect("failed to fetch user") { - zed_user_ids.push(user_id); + if let Some(user) = db + .get_user_by_github_login(zed_user) + .await + .expect("failed to fetch user") + { + zed_user_ids.push(user.id); } else { zed_user_ids.push( db.create_user(zed_user, true) diff --git a/server/src/db.rs b/server/src/db.rs index 14ad85b68af2e..a826220b11b36 100644 --- a/server/src/db.rs +++ b/server/src/db.rs @@ -84,27 +84,12 @@ impl Db { // users - #[allow(unused)] // Help rust-analyzer - #[cfg(any(test, feature = "seed-support"))] - pub async fn get_user(&self, github_login: &str) -> Result> { - test_support!(self, { - let query = " - SELECT id - FROM users - WHERE github_login = $1 - "; - sqlx::query_scalar(query) - .bind(github_login) - .fetch_optional(&self.pool) - .await - }) - } - pub async fn create_user(&self, github_login: &str, admin: bool) -> Result { test_support!(self, { let query = " INSERT INTO users (github_login, admin) VALUES ($1, $2) + ON CONFLICT (github_login) DO UPDATE SET github_login = excluded.github_login RETURNING id "; sqlx::query_scalar(query) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index bbc0b090ca6af..950b6749dc8a7 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -48,8 +48,9 @@ pub struct Server { #[derive(Default)] struct ServerState { connections: HashMap, + connections_by_user_id: HashMap>, pub worktrees: HashMap, - visible_worktrees_by_github_login: HashMap>, + visible_worktrees_by_user_id: HashMap>, channels: HashMap, next_worktree_id: u64, } @@ -62,7 +63,7 @@ struct ConnectionState { struct Worktree { host_connection_id: ConnectionId, - collaborator_github_logins: Vec, + collaborator_user_ids: Vec, root_name: String, share: Option, } @@ -113,7 +114,8 @@ impl Server { .add_handler(Server::join_channel) .add_handler(Server::leave_channel) .add_handler(Server::send_channel_message) - .add_handler(Server::get_channel_messages); + .add_handler(Server::get_channel_messages) + .add_handler(Server::get_collaborators); Arc::new(server) } @@ -215,7 +217,8 @@ impl Server { // Add a new connection associated with a given user. async fn add_connection(&self, connection_id: ConnectionId, user_id: UserId) { - self.state.write().await.connections.insert( + let mut state = self.state.write().await; + state.connections.insert( connection_id, ConnectionState { user_id, @@ -223,6 +226,11 @@ impl Server { channels: Default::default(), }, ); + state + .connections_by_user_id + .entry(user_id) + .or_default() + .insert(connection_id); } // Remove the given connection and its association with any worktrees. @@ -249,6 +257,15 @@ impl Server { } } } + + let user_connections = state + .connections_by_user_id + .get_mut(&connection.user_id) + .unwrap(); + user_connections.remove(&connection_id); + if user_connections.is_empty() { + state.connections_by_user_id.remove(&connection.user_id); + } } worktree_ids } @@ -264,10 +281,24 @@ impl Server { ) -> tide::Result<()> { let receipt = request.receipt(); + let mut collaborator_user_ids = Vec::new(); + for github_login in request.payload.collaborator_logins { + match self.app_state.db.create_user(&github_login, false).await { + Ok(user_id) => collaborator_user_ids.push(user_id), + Err(err) => { + let message = err.to_string(); + self.peer + .respond_with_error(receipt, proto::Error { message }) + .await?; + return Ok(()); + } + } + } + let mut state = self.state.write().await; let worktree_id = state.add_worktree(Worktree { host_connection_id: request.sender_id, - collaborator_github_logins: request.payload.collaborator_logins, + collaborator_user_ids, root_name: request.payload.root_name, share: None, }); @@ -351,12 +382,16 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let worktree_id = request.payload.worktree_id; - let user = self.user_for_connection(request.sender_id).await?; + let user_id = self + .state + .read() + .await + .user_id_for_connection(request.sender_id)?; let response; let connection_ids; let mut state = self.state.write().await; - match state.join_worktree(request.sender_id, &user, worktree_id) { + match state.join_worktree(request.sender_id, user_id, worktree_id) { Ok((peer_replica_id, worktree)) => { let share = worktree.share()?; let peer_count = share.guest_connection_ids.len(); @@ -639,6 +674,66 @@ impl Server { Ok(()) } + async fn get_collaborators( + self: Arc, + request: TypedEnvelope, + ) -> tide::Result<()> { + let mut collaborators = HashMap::new(); + { + let state = self.state.read().await; + let user_id = state.user_id_for_connection(request.sender_id)?; + for worktree_id in state + .visible_worktrees_by_user_id + .get(&user_id) + .unwrap_or(&HashSet::new()) + { + let worktree = &state.worktrees[worktree_id]; + + let mut participants = Vec::new(); + for collaborator_user_id in &worktree.collaborator_user_ids { + collaborators + .entry(*collaborator_user_id) + .or_insert_with(|| proto::Collaborator { + user_id: collaborator_user_id.to_proto(), + worktrees: Vec::new(), + is_online: state.is_online(*collaborator_user_id), + }); + + if let Ok(share) = worktree.share() { + let mut conn_ids = state.user_connection_ids(*collaborator_user_id); + if conn_ids.any(|c| share.guest_connection_ids.contains_key(&c)) { + participants.push(collaborator_user_id.to_proto()); + } + } + } + + let host_user_id = state.user_id_for_connection(worktree.host_connection_id)?; + let host = + collaborators + .entry(host_user_id) + .or_insert_with(|| proto::Collaborator { + user_id: host_user_id.to_proto(), + worktrees: Vec::new(), + is_online: true, + }); + host.worktrees.push(proto::CollaboratorWorktree { + is_shared: worktree.share().is_ok(), + participants, + }); + } + } + + self.peer + .respond( + request.receipt(), + proto::GetCollaboratorsResponse { + collaborators: collaborators.into_values().collect(), + }, + ) + .await?; + Ok(()) + } + async fn join_channel( self: Arc, request: TypedEnvelope, @@ -856,24 +951,6 @@ impl Server { Ok(()) } - async fn user_for_connection(&self, connection_id: ConnectionId) -> tide::Result { - let user_id = self - .state - .read() - .await - .connections - .get(&connection_id) - .ok_or_else(|| anyhow!("no such connection"))? - .user_id; - Ok(self - .app_state - .db - .get_users_by_ids(user_id, Some(user_id).into_iter()) - .await? - .pop() - .ok_or_else(|| anyhow!("no such user"))?) - } - async fn broadcast_in_worktree( &self, worktree_id: u64, @@ -945,11 +1022,26 @@ impl ServerState { .user_id) } + fn user_connection_ids<'a>( + &'a self, + user_id: UserId, + ) -> impl 'a + Iterator { + self.connections_by_user_id + .get(&user_id) + .into_iter() + .flatten() + .copied() + } + + fn is_online(&self, user_id: UserId) -> bool { + self.connections_by_user_id.contains_key(&user_id) + } + // Add the given connection as a guest of the given worktree fn join_worktree( &mut self, connection_id: ConnectionId, - user: &User, + user_id: UserId, worktree_id: u64, ) -> tide::Result<(ReplicaId, &Worktree)> { let connection = self @@ -960,10 +1052,7 @@ impl ServerState { .worktrees .get_mut(&worktree_id) .ok_or_else(|| anyhow!("no such worktree"))?; - if !worktree - .collaborator_github_logins - .contains(&user.github_login) - { + if !worktree.collaborator_user_ids.contains(&user_id) { Err(anyhow!("no such worktree"))?; } @@ -1032,9 +1121,9 @@ impl ServerState { fn add_worktree(&mut self, worktree: Worktree) -> u64 { let worktree_id = self.next_worktree_id; - for collaborator_login in &worktree.collaborator_github_logins { - self.visible_worktrees_by_github_login - .entry(collaborator_login.clone()) + for collaborator_user_id in &worktree.collaborator_user_ids { + self.visible_worktrees_by_user_id + .entry(*collaborator_user_id) .or_default() .insert(worktree_id); } @@ -1055,10 +1144,10 @@ impl ServerState { } } } - for collaborator_login in worktree.collaborator_github_logins { + for collaborator_user_id in worktree.collaborator_user_ids { if let Some(visible_worktrees) = self - .visible_worktrees_by_github_login - .get_mut(&collaborator_login) + .visible_worktrees_by_user_id + .get_mut(&collaborator_user_id) { visible_worktrees.remove(&worktree_id); } diff --git a/zrpc/proto/zed.proto b/zrpc/proto/zed.proto index b5fe1604aee48..30a282f6b59a1 100644 --- a/zrpc/proto/zed.proto +++ b/zrpc/proto/zed.proto @@ -38,6 +38,8 @@ message Envelope { OpenWorktree open_worktree = 33; OpenWorktreeResponse open_worktree_response = 34; UnshareWorktree unshare_worktree = 35; + GetCollaborators get_collaborators = 36; + GetCollaboratorsResponse get_collaborators_response = 37; } } @@ -184,6 +186,12 @@ message GetChannelMessagesResponse { bool done = 2; } +message GetCollaborators {} + +message GetCollaboratorsResponse { + repeated Collaborator collaborators = 1; +} + // Entities message Peer { @@ -326,3 +334,14 @@ message ChannelMessage { uint64 sender_id = 4; Nonce nonce = 5; } + +message Collaborator { + uint64 user_id = 1; + repeated CollaboratorWorktree worktrees = 2; + bool is_online = 3; +} + +message CollaboratorWorktree { + bool is_shared = 1; + repeated uint64 participants = 2; +} diff --git a/zrpc/src/proto.rs b/zrpc/src/proto.rs index 09348a0fc5d2a..282710c64c734 100644 --- a/zrpc/src/proto.rs +++ b/zrpc/src/proto.rs @@ -131,6 +131,8 @@ messages!( GetChannelMessagesResponse, GetChannels, GetChannelsResponse, + GetCollaborators, + GetCollaboratorsResponse, GetUsers, GetUsersResponse, JoinChannel, @@ -168,6 +170,7 @@ request_messages!( (UnshareWorktree, Ack), (SendChannelMessage, SendChannelMessageResponse), (GetChannelMessages, GetChannelMessagesResponse), + (GetCollaborators, GetCollaboratorsResponse), ); entity_messages!( From f2825ccebacd77d71cc63dc642ad99740774d1ab Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 17 Sep 2021 16:14:44 +0200 Subject: [PATCH 05/43] Start on `Presence` as the backing model for the people panel --- server/src/rpc.rs | 14 ++++- zed/src/lib.rs | 4 ++ zed/src/main.rs | 5 +- zed/src/people_panel.rs | 31 ++++++++++ zed/src/presence.rs | 129 ++++++++++++++++++++++++++++++++++++++++ zed/src/test.rs | 2 + zed/src/workspace.rs | 7 ++- zrpc/proto/zed.proto | 5 +- 8 files changed, 191 insertions(+), 6 deletions(-) create mode 100644 zed/src/people_panel.rs create mode 100644 zed/src/presence.rs diff --git a/server/src/rpc.rs b/server/src/rpc.rs index 950b6749dc8a7..1b65ed6d67ded 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -1,6 +1,6 @@ use super::{ auth, - db::{ChannelId, MessageId, User, UserId}, + db::{ChannelId, MessageId, UserId}, AppState, }; use anyhow::anyhow; @@ -280,11 +280,20 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let receipt = request.receipt(); + let user_id = self + .state + .read() + .await + .user_id_for_connection(request.sender_id)?; let mut collaborator_user_ids = Vec::new(); for github_login in request.payload.collaborator_logins { match self.app_state.db.create_user(&github_login, false).await { - Ok(user_id) => collaborator_user_ids.push(user_id), + Ok(collaborator_user_id) => { + if collaborator_user_id != user_id { + collaborator_user_ids.push(collaborator_user_id); + } + } Err(err) => { let message = err.to_string(); self.peer @@ -717,6 +726,7 @@ impl Server { is_online: true, }); host.worktrees.push(proto::CollaboratorWorktree { + root_name: worktree.root_name.clone(), is_shared: worktree.share().is_ok(), participants, }); diff --git a/zed/src/lib.rs b/zed/src/lib.rs index c9cec56f46da0..e07d04a5a4af7 100644 --- a/zed/src/lib.rs +++ b/zed/src/lib.rs @@ -8,6 +8,8 @@ mod fuzzy; pub mod http; pub mod language; pub mod menus; +pub mod people_panel; +pub mod presence; pub mod project_browser; pub mod rpc; pub mod settings; @@ -26,6 +28,7 @@ use channel::ChannelList; use gpui::{action, keymap::Binding, ModelHandle}; use parking_lot::Mutex; use postage::watch; +use presence::Presence; use std::sync::Arc; pub use settings::Settings; @@ -46,6 +49,7 @@ pub struct AppState { pub user_store: Arc, pub fs: Arc, pub channel_list: ModelHandle, + pub presence: ModelHandle, } pub fn init(app_state: &Arc, cx: &mut gpui::MutableAppContext) { diff --git a/zed/src/main.rs b/zed/src/main.rs index 87426c1ca7bac..45aeb52dbcf66 100644 --- a/zed/src/main.rs +++ b/zed/src/main.rs @@ -13,7 +13,9 @@ use zed::{ channel::ChannelList, chat_panel, editor, file_finder, fs::RealFs, - http, language, menus, rpc, settings, theme_selector, + http, language, menus, + presence::Presence, + rpc, settings, theme_selector, user::UserStore, workspace::{self, OpenNew, OpenParams, OpenPaths}, AppState, @@ -45,6 +47,7 @@ fn main() { settings, themes, channel_list: cx.add_model(|cx| ChannelList::new(user_store.clone(), rpc.clone(), cx)), + presence: cx.add_model(|cx| Presence::new(user_store.clone(), rpc.clone(), cx)), rpc, user_store, fs: Arc::new(RealFs), diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs new file mode 100644 index 0000000000000..7e6d67548966e --- /dev/null +++ b/zed/src/people_panel.rs @@ -0,0 +1,31 @@ +use crate::presence::Presence; +use gpui::{ + elements::Empty, Element, ElementBox, Entity, ModelHandle, RenderContext, View, ViewContext, +}; + +pub struct PeoplePanel { + presence: ModelHandle, +} + +impl PeoplePanel { + pub fn new(presence: ModelHandle, cx: &mut ViewContext) -> Self { + cx.observe(&presence, |_, _, cx| cx.notify()); + Self { presence } + } +} + +pub enum Event {} + +impl Entity for PeoplePanel { + type Event = Event; +} + +impl View for PeoplePanel { + fn ui_name() -> &'static str { + "PeoplePanel" + } + + fn render(&mut self, _: &mut RenderContext) -> ElementBox { + Empty::new().boxed() + } +} diff --git a/zed/src/presence.rs b/zed/src/presence.rs new file mode 100644 index 0000000000000..5196a6971cd46 --- /dev/null +++ b/zed/src/presence.rs @@ -0,0 +1,129 @@ +use crate::{ + rpc::Client, + user::{User, UserStore}, + util::TryFutureExt, +}; +use anyhow::Result; +use gpui::{Entity, ModelContext, Task}; +use postage::prelude::Stream; +use smol::future::FutureExt; +use std::{collections::HashSet, sync::Arc, time::Duration}; +use zrpc::proto; + +pub struct Presence { + collaborators: Vec, + user_store: Arc, + rpc: Arc, + _maintain_people: Task<()>, +} + +#[derive(Debug)] +struct Collaborator { + user: Arc, + worktrees: Vec, +} + +#[derive(Debug)] +struct CollaboratorWorktree { + root_name: String, + is_shared: bool, + participants: Vec>, +} + +impl Presence { + pub fn new(user_store: Arc, rpc: Arc, cx: &mut ModelContext) -> Self { + let _maintain_collaborators = cx.spawn_weak(|this, mut cx| { + let user_store = user_store.clone(); + let foreground = cx.foreground(); + async move { + let mut current_user = user_store.watch_current_user(); + loop { + let timer = foreground.timer(Duration::from_secs(2)); + let next_current_user = async { + current_user.recv().await; + }; + + next_current_user.race(timer).await; + if current_user.borrow().is_some() { + if let Some(this) = cx.read(|cx| this.upgrade(cx)) { + this.update(&mut cx, |this, cx| this.refresh(cx)) + .log_err() + .await; + } + } + } + } + }); + + Self { + collaborators: Vec::new(), + user_store, + rpc, + _maintain_people: _maintain_collaborators, + } + } + + fn refresh(&self, cx: &mut ModelContext) -> Task> { + cx.spawn(|this, mut cx| { + let rpc = self.rpc.clone(); + let user_store = self.user_store.clone(); + async move { + let response = rpc.request(proto::GetCollaborators {}).await?; + let mut user_ids = HashSet::new(); + for collaborator in &response.collaborators { + user_ids.insert(collaborator.user_id); + user_ids.extend( + collaborator + .worktrees + .iter() + .flat_map(|w| &w.participants) + .copied(), + ); + } + user_store + .load_users(user_ids.into_iter().collect()) + .await?; + + let mut collaborators = Vec::new(); + for collaborator in response.collaborators { + collaborators.push(Collaborator::from_proto(collaborator, &user_store).await?); + } + + this.update(&mut cx, |this, cx| { + this.collaborators = collaborators; + cx.notify(); + }); + + Ok(()) + } + }) + } +} + +pub enum Event {} + +impl Entity for Presence { + type Event = Event; +} + +impl Collaborator { + async fn from_proto( + collaborator: proto::Collaborator, + user_store: &Arc, + ) -> Result { + let user = user_store.fetch_user(collaborator.user_id).await?; + let mut worktrees = Vec::new(); + for worktree in collaborator.worktrees { + let mut participants = Vec::new(); + for participant_id in worktree.participants { + participants.push(user_store.fetch_user(participant_id).await?); + } + worktrees.push(CollaboratorWorktree { + root_name: worktree.root_name, + is_shared: worktree.is_shared, + participants, + }); + } + Ok(Self { user, worktrees }) + } +} diff --git a/zed/src/test.rs b/zed/src/test.rs index 7d027a8a1771b..77b284b24580a 100644 --- a/zed/src/test.rs +++ b/zed/src/test.rs @@ -4,6 +4,7 @@ use crate::{ fs::RealFs, http::{HttpClient, Request, Response, ServerResponse}, language::LanguageRegistry, + presence::Presence, rpc::{self, Client, Credentials, EstablishConnectionError}, settings::{self, ThemeRegistry}, time::ReplicaId, @@ -175,6 +176,7 @@ pub fn test_app_state(cx: &mut MutableAppContext) -> Arc { themes, languages: languages.clone(), channel_list: cx.add_model(|cx| ChannelList::new(user_store.clone(), rpc.clone(), cx)), + presence: cx.add_model(|cx| Presence::new(user_store.clone(), rpc.clone(), cx)), rpc, user_store, fs: Arc::new(RealFs), diff --git a/zed/src/workspace.rs b/zed/src/workspace.rs index bc918bb30fd5c..bcdbe17a545c4 100644 --- a/zed/src/workspace.rs +++ b/zed/src/workspace.rs @@ -7,6 +7,7 @@ use crate::{ editor::Buffer, fs::Fs, language::LanguageRegistry, + people_panel::PeoplePanel, project_browser::ProjectBrowser, rpc, settings::Settings, @@ -378,7 +379,11 @@ impl Workspace { }) .into(), ); - right_sidebar.add_item("icons/user-16.svg", cx.add_view(|_| ProjectBrowser).into()); + right_sidebar.add_item( + "icons/user-16.svg", + cx.add_view(|cx| PeoplePanel::new(app_state.presence.clone(), cx)) + .into(), + ); let mut current_user = app_state.user_store.watch_current_user().clone(); let mut connection_status = app_state.rpc.status().clone(); diff --git a/zrpc/proto/zed.proto b/zrpc/proto/zed.proto index 30a282f6b59a1..c1fd85ea277fc 100644 --- a/zrpc/proto/zed.proto +++ b/zrpc/proto/zed.proto @@ -342,6 +342,7 @@ message Collaborator { } message CollaboratorWorktree { - bool is_shared = 1; - repeated uint64 participants = 2; + string root_name = 1; + bool is_shared = 2; + repeated uint64 participants = 3; } From f13af7dc59fc44bfb8d7890a54b5143dd5d02d4f Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 17 Sep 2021 18:37:00 +0200 Subject: [PATCH 06/43] WIP Co-Authored-By: Nathan Sobo --- server/src/rpc.rs | 83 ++++++++++++++++++++++++-------------------- zed/src/presence.rs | 6 ++-- zrpc/proto/zed.proto | 12 +++---- zrpc/src/proto.rs | 4 +-- 4 files changed, 54 insertions(+), 51 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index 1b65ed6d67ded..75e022e94bdff 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -114,8 +114,7 @@ impl Server { .add_handler(Server::join_channel) .add_handler(Server::leave_channel) .add_handler(Server::send_channel_message) - .add_handler(Server::get_channel_messages) - .add_handler(Server::get_collaborators); + .add_handler(Server::get_channel_messages); Arc::new(server) } @@ -307,7 +306,7 @@ impl Server { let mut state = self.state.write().await; let worktree_id = state.add_worktree(Worktree { host_connection_id: request.sender_id, - collaborator_user_ids, + collaborator_user_ids: collaborator_user_ids.clone(), root_name: request.payload.root_name, share: None, }); @@ -315,6 +314,8 @@ impl Server { self.peer .respond(receipt, proto::OpenWorktreeResponse { worktree_id }) .await?; + self.update_collaborators(&collaborator_user_ids).await?; + Ok(()) } @@ -341,6 +342,10 @@ impl Server { self.peer .respond(request.receipt(), proto::ShareWorktreeResponse {}) .await?; + + let collaborator_user_ids = worktree.collaborator_user_ids.clone(); + drop(state); + self.update_collaborators(&collaborator_user_ids).await?; } else { self.peer .respond_with_error( @@ -361,6 +366,7 @@ impl Server { let worktree_id = request.payload.worktree_id; let connection_ids; + let collaborator_user_ids; { let mut state = self.state.write().await; let worktree = state.write_worktree(worktree_id, request.sender_id)?; @@ -369,6 +375,7 @@ impl Server { } connection_ids = worktree.connection_ids(); + collaborator_user_ids = worktree.collaborator_user_ids.clone(); worktree.share.take(); for connection_id in &connection_ids { if let Some(connection) = state.connections.get_mut(connection_id) { @@ -382,6 +389,7 @@ impl Server { .send(conn_id, proto::UnshareWorktree { worktree_id }) }) .await?; + self.update_collaborators(&collaborator_user_ids).await?; Ok(()) } @@ -399,6 +407,7 @@ impl Server { let response; let connection_ids; + let collaborator_user_ids; let mut state = self.state.write().await; match state.join_worktree(request.sender_id, user_id, worktree_id) { Ok((peer_replica_id, worktree)) => { @@ -418,6 +427,7 @@ impl Server { } } connection_ids = worktree.connection_ids(); + collaborator_user_ids = worktree.collaborator_user_ids.clone(); response = proto::JoinWorktreeResponse { worktree: Some(proto::Worktree { id: worktree_id, @@ -455,6 +465,7 @@ impl Server { }) .await?; self.peer.respond(request.receipt(), response).await?; + self.update_collaborators(&collaborator_user_ids).await?; Ok(()) } @@ -683,14 +694,12 @@ impl Server { Ok(()) } - async fn get_collaborators( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let mut collaborators = HashMap::new(); - { - let state = self.state.read().await; - let user_id = state.user_id_for_connection(request.sender_id)?; + async fn update_collaborators(self: &Arc, user_ids: &[UserId]) -> tide::Result<()> { + let mut send_futures = Vec::new(); + + let state = self.state.read().await; + for user_id in user_ids { + let mut collaborators = HashMap::new(); for worktree_id in state .visible_worktrees_by_user_id .get(&user_id) @@ -698,21 +707,11 @@ impl Server { { let worktree = &state.worktrees[worktree_id]; - let mut participants = Vec::new(); - for collaborator_user_id in &worktree.collaborator_user_ids { - collaborators - .entry(*collaborator_user_id) - .or_insert_with(|| proto::Collaborator { - user_id: collaborator_user_id.to_proto(), - worktrees: Vec::new(), - is_online: state.is_online(*collaborator_user_id), - }); - - if let Ok(share) = worktree.share() { - let mut conn_ids = state.user_connection_ids(*collaborator_user_id); - if conn_ids.any(|c| share.guest_connection_ids.contains_key(&c)) { - participants.push(collaborator_user_id.to_proto()); - } + let mut participants = HashSet::new(); + if let Ok(share) = worktree.share() { + for guest_connection_id in share.guest_connection_ids.keys() { + let user_id = state.user_id_for_connection(*guest_connection_id)?; + participants.insert(user_id.to_proto()); } } @@ -723,24 +722,34 @@ impl Server { .or_insert_with(|| proto::Collaborator { user_id: host_user_id.to_proto(), worktrees: Vec::new(), - is_online: true, }); - host.worktrees.push(proto::CollaboratorWorktree { + host.worktrees.push(proto::WorktreeMetadata { root_name: worktree.root_name.clone(), is_shared: worktree.share().is_ok(), - participants, + participants: participants.into_iter().collect(), }); } + + let connection_ids = self + .state + .read() + .await + .user_connection_ids(*user_id) + .collect::>(); + + let collaborators = collaborators.into_values().collect::>(); + for connection_id in connection_ids { + send_futures.push(self.peer.send( + connection_id, + proto::UpdateCollaborators { + collaborators: collaborators.clone(), + }, + )); + } } - self.peer - .respond( - request.receipt(), - proto::GetCollaboratorsResponse { - collaborators: collaborators.into_values().collect(), - }, - ) - .await?; + futures::future::try_join_all(send_futures).await?; + Ok(()) } diff --git a/zed/src/presence.rs b/zed/src/presence.rs index 5196a6971cd46..0dc6a2a3ed2d4 100644 --- a/zed/src/presence.rs +++ b/zed/src/presence.rs @@ -20,11 +20,11 @@ pub struct Presence { #[derive(Debug)] struct Collaborator { user: Arc, - worktrees: Vec, + worktrees: Vec, } #[derive(Debug)] -struct CollaboratorWorktree { +struct WorktreeMetadata { root_name: String, is_shared: bool, participants: Vec>, @@ -118,7 +118,7 @@ impl Collaborator { for participant_id in worktree.participants { participants.push(user_store.fetch_user(participant_id).await?); } - worktrees.push(CollaboratorWorktree { + worktrees.push(WorktreeMetadata { root_name: worktree.root_name, is_shared: worktree.is_shared, participants, diff --git a/zrpc/proto/zed.proto b/zrpc/proto/zed.proto index c1fd85ea277fc..074bbe60938c8 100644 --- a/zrpc/proto/zed.proto +++ b/zrpc/proto/zed.proto @@ -38,8 +38,7 @@ message Envelope { OpenWorktree open_worktree = 33; OpenWorktreeResponse open_worktree_response = 34; UnshareWorktree unshare_worktree = 35; - GetCollaborators get_collaborators = 36; - GetCollaboratorsResponse get_collaborators_response = 37; + UpdateCollaborators update_collaborators = 36; } } @@ -186,9 +185,7 @@ message GetChannelMessagesResponse { bool done = 2; } -message GetCollaborators {} - -message GetCollaboratorsResponse { +message UpdateCollaborators { repeated Collaborator collaborators = 1; } @@ -337,11 +334,10 @@ message ChannelMessage { message Collaborator { uint64 user_id = 1; - repeated CollaboratorWorktree worktrees = 2; - bool is_online = 3; + repeated WorktreeMetadata worktrees = 2; } -message CollaboratorWorktree { +message WorktreeMetadata { string root_name = 1; bool is_shared = 2; repeated uint64 participants = 3; diff --git a/zrpc/src/proto.rs b/zrpc/src/proto.rs index 282710c64c734..f094923af387a 100644 --- a/zrpc/src/proto.rs +++ b/zrpc/src/proto.rs @@ -131,8 +131,7 @@ messages!( GetChannelMessagesResponse, GetChannels, GetChannelsResponse, - GetCollaborators, - GetCollaboratorsResponse, + UpdateCollaborators, GetUsers, GetUsersResponse, JoinChannel, @@ -170,7 +169,6 @@ request_messages!( (UnshareWorktree, Ack), (SendChannelMessage, SendChannelMessageResponse), (GetChannelMessages, GetChannelMessagesResponse), - (GetCollaborators, GetCollaboratorsResponse), ); entity_messages!( From d8ea220acc4bc9875167168f9c2956983c5a1d57 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 20 Sep 2021 12:29:32 +0200 Subject: [PATCH 07/43] Update collaborators as worktrees are opened/shared/closed --- server/src/rpc.rs | 163 +++++++++++++++++++++++++--------------------- 1 file changed, 87 insertions(+), 76 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index 75e022e94bdff..86b9b96caee82 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -99,7 +99,7 @@ impl Server { server .add_handler(Server::ping) .add_handler(Server::open_worktree) - .add_handler(Server::close_worktree) + .add_handler(Server::handle_close_worktree) .add_handler(Server::share_worktree) .add_handler(Server::unshare_worktree) .add_handler(Server::join_worktree) @@ -195,22 +195,7 @@ impl Server { async fn sign_out(self: &Arc, connection_id: zrpc::ConnectionId) -> tide::Result<()> { self.peer.disconnect(connection_id).await; - let worktree_ids = self.remove_connection(connection_id).await; - for worktree_id in worktree_ids { - let state = self.state.read().await; - if let Some(worktree) = state.worktrees.get(&worktree_id) { - broadcast(connection_id, worktree.connection_ids(), |conn_id| { - self.peer.send( - conn_id, - proto::RemovePeer { - worktree_id, - peer_id: connection_id.0, - }, - ) - }) - .await?; - } - } + self.remove_connection(connection_id).await?; Ok(()) } @@ -233,29 +218,20 @@ impl Server { } // Remove the given connection and its association with any worktrees. - async fn remove_connection(&self, connection_id: ConnectionId) -> Vec { + async fn remove_connection( + self: &Arc, + connection_id: ConnectionId, + ) -> tide::Result<()> { let mut worktree_ids = Vec::new(); let mut state = self.state.write().await; if let Some(connection) = state.connections.remove(&connection_id) { + worktree_ids = connection.worktrees.into_iter().collect(); + for channel_id in connection.channels { if let Some(channel) = state.channels.get_mut(&channel_id) { channel.connection_ids.remove(&connection_id); } } - for worktree_id in connection.worktrees { - if let Some(worktree) = state.worktrees.get_mut(&worktree_id) { - if worktree.host_connection_id == connection_id { - worktree_ids.push(worktree_id); - } else if let Some(share_state) = worktree.share.as_mut() { - if let Some(replica_id) = - share_state.guest_connection_ids.remove(&connection_id) - { - share_state.active_replica_ids.remove(&replica_id); - worktree_ids.push(worktree_id); - } - } - } - } let user_connections = state .connections_by_user_id @@ -266,7 +242,12 @@ impl Server { state.connections_by_user_id.remove(&connection.user_id); } } - worktree_ids + + for worktree_id in worktree_ids { + self.close_worktree(worktree_id, connection_id).await?; + } + + Ok(()) } async fn ping(self: Arc, request: TypedEnvelope) -> tide::Result<()> { @@ -279,7 +260,7 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let receipt = request.receipt(); - let user_id = self + let host_user_id = self .state .read() .await @@ -289,7 +270,7 @@ impl Server { for github_login in request.payload.collaborator_logins { match self.app_state.db.create_user(&github_login, false).await { Ok(collaborator_user_id) => { - if collaborator_user_id != user_id { + if collaborator_user_id != host_user_id { collaborator_user_ids.push(collaborator_user_id); } } @@ -303,18 +284,24 @@ impl Server { } } - let mut state = self.state.write().await; - let worktree_id = state.add_worktree(Worktree { - host_connection_id: request.sender_id, - collaborator_user_ids: collaborator_user_ids.clone(), - root_name: request.payload.root_name, - share: None, - }); + let worktree_id; + let mut user_ids; + { + let mut state = self.state.write().await; + worktree_id = state.add_worktree(Worktree { + host_connection_id: request.sender_id, + collaborator_user_ids: collaborator_user_ids.clone(), + root_name: request.payload.root_name, + share: None, + }); + user_ids = collaborator_user_ids; + user_ids.push(host_user_id); + } self.peer .respond(receipt, proto::OpenWorktreeResponse { worktree_id }) .await?; - self.update_collaborators(&collaborator_user_ids).await?; + self.update_collaborators_for_users(&user_ids).await?; Ok(()) } @@ -323,6 +310,11 @@ impl Server { self: Arc, mut request: TypedEnvelope, ) -> tide::Result<()> { + let host_user_id = self + .state + .read() + .await + .user_id_for_connection(request.sender_id)?; let worktree = request .payload .worktree @@ -332,6 +324,7 @@ impl Server { .into_iter() .map(|entry| (entry.id, entry)) .collect(); + let mut state = self.state.write().await; if let Some(worktree) = state.worktrees.get_mut(&worktree.id) { worktree.share = Some(WorktreeShare { @@ -339,13 +332,15 @@ impl Server { active_replica_ids: Default::default(), entries, }); + + let mut user_ids = worktree.collaborator_user_ids.clone(); + user_ids.push(host_user_id); + + drop(state); self.peer .respond(request.receipt(), proto::ShareWorktreeResponse {}) .await?; - - let collaborator_user_ids = worktree.collaborator_user_ids.clone(); - drop(state); - self.update_collaborators(&collaborator_user_ids).await?; + self.update_collaborators_for_users(&user_ids).await?; } else { self.peer .respond_with_error( @@ -364,9 +359,14 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let worktree_id = request.payload.worktree_id; + let host_user_id = self + .state + .read() + .await + .user_id_for_connection(request.sender_id)?; let connection_ids; - let collaborator_user_ids; + let mut user_ids; { let mut state = self.state.write().await; let worktree = state.write_worktree(worktree_id, request.sender_id)?; @@ -375,7 +375,8 @@ impl Server { } connection_ids = worktree.connection_ids(); - collaborator_user_ids = worktree.collaborator_user_ids.clone(); + user_ids = worktree.collaborator_user_ids.clone(); + user_ids.push(host_user_id); worktree.share.take(); for connection_id in &connection_ids { if let Some(connection) = state.connections.get_mut(connection_id) { @@ -389,7 +390,7 @@ impl Server { .send(conn_id, proto::UnshareWorktree { worktree_id }) }) .await?; - self.update_collaborators(&collaborator_user_ids).await?; + self.update_collaborators_for_users(&user_ids).await?; Ok(()) } @@ -407,7 +408,7 @@ impl Server { let response; let connection_ids; - let collaborator_user_ids; + let mut user_ids; let mut state = self.state.write().await; match state.join_worktree(request.sender_id, user_id, worktree_id) { Ok((peer_replica_id, worktree)) => { @@ -426,8 +427,6 @@ impl Server { }); } } - connection_ids = worktree.connection_ids(); - collaborator_user_ids = worktree.collaborator_user_ids.clone(); response = proto::JoinWorktreeResponse { worktree: Some(proto::Worktree { id: worktree_id, @@ -437,6 +436,11 @@ impl Server { replica_id: peer_replica_id as u32, peers, }; + + let host_connection_id = worktree.host_connection_id; + connection_ids = worktree.connection_ids(); + user_ids = worktree.collaborator_user_ids.clone(); + user_ids.push(state.user_id_for_connection(host_connection_id)?); } Err(error) => { self.peer @@ -465,55 +469,69 @@ impl Server { }) .await?; self.peer.respond(request.receipt(), response).await?; - self.update_collaborators(&collaborator_user_ids).await?; + self.update_collaborators_for_users(&user_ids).await?; Ok(()) } - async fn close_worktree( + async fn handle_close_worktree( self: Arc, request: TypedEnvelope, ) -> tide::Result<()> { - let worktree_id = request.payload.worktree_id; + self.close_worktree(request.payload.worktree_id, request.sender_id) + .await + } + + async fn close_worktree( + self: &Arc, + worktree_id: u64, + conn_id: ConnectionId, + ) -> tide::Result<()> { let connection_ids; + let mut user_ids; + let mut is_host = false; let mut is_guest = false; { let mut state = self.state.write().await; - let worktree = state.write_worktree(worktree_id, request.sender_id)?; + let worktree = state.write_worktree(worktree_id, conn_id)?; + let host_connection_id = worktree.host_connection_id; connection_ids = worktree.connection_ids(); + user_ids = worktree.collaborator_user_ids.clone(); - if worktree.host_connection_id == request.sender_id { + if worktree.host_connection_id == conn_id { is_host = true; state.remove_worktree(worktree_id); } else { let share = worktree.share_mut()?; - if let Some(replica_id) = share.guest_connection_ids.remove(&request.sender_id) { + if let Some(replica_id) = share.guest_connection_ids.remove(&conn_id) { is_guest = true; share.active_replica_ids.remove(&replica_id); } } + + user_ids.push(state.user_id_for_connection(host_connection_id)?); } if is_host { - broadcast(request.sender_id, connection_ids, |conn_id| { + broadcast(conn_id, connection_ids, |conn_id| { self.peer .send(conn_id, proto::UnshareWorktree { worktree_id }) }) .await?; } else if is_guest { - broadcast(request.sender_id, connection_ids, |conn_id| { + broadcast(conn_id, connection_ids, |conn_id| { self.peer.send( conn_id, proto::RemovePeer { worktree_id, - peer_id: request.sender_id.0, + peer_id: conn_id.0, }, ) }) .await? } - + self.update_collaborators_for_users(&user_ids).await?; Ok(()) } @@ -694,7 +712,10 @@ impl Server { Ok(()) } - async fn update_collaborators(self: &Arc, user_ids: &[UserId]) -> tide::Result<()> { + async fn update_collaborators_for_users<'a>( + self: &Arc, + user_ids: impl IntoIterator, + ) -> tide::Result<()> { let mut send_futures = Vec::new(); let state = self.state.read().await; @@ -730,15 +751,8 @@ impl Server { }); } - let connection_ids = self - .state - .read() - .await - .user_connection_ids(*user_id) - .collect::>(); - let collaborators = collaborators.into_values().collect::>(); - for connection_id in connection_ids { + for connection_id in state.user_connection_ids(*user_id) { send_futures.push(self.peer.send( connection_id, proto::UpdateCollaborators { @@ -748,6 +762,7 @@ impl Server { } } + drop(state); futures::future::try_join_all(send_futures).await?; Ok(()) @@ -1052,10 +1067,6 @@ impl ServerState { .copied() } - fn is_online(&self, user_id: UserId) -> bool { - self.connections_by_user_id.contains_key(&user_id) - } - // Add the given connection as a guest of the given worktree fn join_worktree( &mut self, From ae9fb6531547d235cb17b69482b4b95d8059356c Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 20 Sep 2021 12:52:13 +0200 Subject: [PATCH 08/43] Fix integration tests --- server/src/rpc.rs | 16 +++++++++------- zed/src/presence.rs | 46 ++++++++++++++++++++++----------------------- 2 files changed, 32 insertions(+), 30 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index 86b9b96caee82..e2cf625857a8e 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -243,6 +243,7 @@ impl Server { } } + drop(state); for worktree_id in worktree_ids { self.close_worktree(worktree_id, connection_id).await?; } @@ -455,6 +456,7 @@ impl Server { } } + drop(state); broadcast(request.sender_id, connection_ids, |conn_id| { self.peer.send( conn_id, @@ -485,7 +487,7 @@ impl Server { async fn close_worktree( self: &Arc, worktree_id: u64, - conn_id: ConnectionId, + sender_conn_id: ConnectionId, ) -> tide::Result<()> { let connection_ids; let mut user_ids; @@ -494,17 +496,17 @@ impl Server { let mut is_guest = false; { let mut state = self.state.write().await; - let worktree = state.write_worktree(worktree_id, conn_id)?; + let worktree = state.write_worktree(worktree_id, sender_conn_id)?; let host_connection_id = worktree.host_connection_id; connection_ids = worktree.connection_ids(); user_ids = worktree.collaborator_user_ids.clone(); - if worktree.host_connection_id == conn_id { + if worktree.host_connection_id == sender_conn_id { is_host = true; state.remove_worktree(worktree_id); } else { let share = worktree.share_mut()?; - if let Some(replica_id) = share.guest_connection_ids.remove(&conn_id) { + if let Some(replica_id) = share.guest_connection_ids.remove(&sender_conn_id) { is_guest = true; share.active_replica_ids.remove(&replica_id); } @@ -514,18 +516,18 @@ impl Server { } if is_host { - broadcast(conn_id, connection_ids, |conn_id| { + broadcast(sender_conn_id, connection_ids, |conn_id| { self.peer .send(conn_id, proto::UnshareWorktree { worktree_id }) }) .await?; } else if is_guest { - broadcast(conn_id, connection_ids, |conn_id| { + broadcast(sender_conn_id, connection_ids, |conn_id| { self.peer.send( conn_id, proto::RemovePeer { worktree_id, - peer_id: conn_id.0, + peer_id: sender_conn_id.0, }, ) }) diff --git a/zed/src/presence.rs b/zed/src/presence.rs index 0dc6a2a3ed2d4..356baa22b4d87 100644 --- a/zed/src/presence.rs +++ b/zed/src/presence.rs @@ -68,31 +68,31 @@ impl Presence { let rpc = self.rpc.clone(); let user_store = self.user_store.clone(); async move { - let response = rpc.request(proto::GetCollaborators {}).await?; - let mut user_ids = HashSet::new(); - for collaborator in &response.collaborators { - user_ids.insert(collaborator.user_id); - user_ids.extend( - collaborator - .worktrees - .iter() - .flat_map(|w| &w.participants) - .copied(), - ); - } - user_store - .load_users(user_ids.into_iter().collect()) - .await?; + // let response = rpc.request(proto::GetCollaborators {}).await?; + // let mut user_ids = HashSet::new(); + // for collaborator in &response.collaborators { + // user_ids.insert(collaborator.user_id); + // user_ids.extend( + // collaborator + // .worktrees + // .iter() + // .flat_map(|w| &w.participants) + // .copied(), + // ); + // } + // user_store + // .load_users(user_ids.into_iter().collect()) + // .await?; - let mut collaborators = Vec::new(); - for collaborator in response.collaborators { - collaborators.push(Collaborator::from_proto(collaborator, &user_store).await?); - } + // let mut collaborators = Vec::new(); + // for collaborator in response.collaborators { + // collaborators.push(Collaborator::from_proto(collaborator, &user_store).await?); + // } - this.update(&mut cx, |this, cx| { - this.collaborators = collaborators; - cx.notify(); - }); + // this.update(&mut cx, |this, cx| { + // this.collaborators = collaborators; + // cx.notify(); + // }); Ok(()) } From 6f2c8ffb00ed807eb4b51170bb8fe17096e43bc7 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 20 Sep 2021 14:14:07 +0200 Subject: [PATCH 09/43] Introduce a `Client::subscribe` method that doesn't need an entity --- zed/src/channel.rs | 2 +- zed/src/rpc.rs | 42 +++++++++++++++++++++++++++++++++++++++++- zed/src/worktree.rs | 22 +++++++++++----------- 3 files changed, 53 insertions(+), 13 deletions(-) diff --git a/zed/src/channel.rs b/zed/src/channel.rs index c43cf2e6f7b28..2eb904915c01d 100644 --- a/zed/src/channel.rs +++ b/zed/src/channel.rs @@ -190,7 +190,7 @@ impl Channel { rpc: Arc, cx: &mut ModelContext, ) -> Self { - let _subscription = rpc.subscribe_from_model(details.id, cx, Self::handle_message_sent); + let _subscription = rpc.subscribe_to_entity(details.id, cx, Self::handle_message_sent); { let user_store = user_store.clone(); diff --git a/zed/src/rpc.rs b/zed/src/rpc.rs index fe1dde4ffba2d..6a81bf3c4a28d 100644 --- a/zed/src/rpc.rs +++ b/zed/src/rpc.rs @@ -230,7 +230,47 @@ impl Client { } } - pub fn subscribe_from_model( + pub fn subscribe(self: &Arc, cx: ModelContext, mut handler: F) -> Subscription + where + T: EnvelopedMessage, + M: Entity, + F: 'static + + Send + + Sync + + FnMut(&mut M, TypedEnvelope, Arc, &mut ModelContext) -> Result<()>, + { + let subscription_id = (TypeId::of::(), Default::default()); + let client = self.clone(); + let mut state = self.state.write(); + let model = cx.handle().downgrade(); + let prev_extractor = state + .entity_id_extractors + .insert(subscription_id.0, Box::new(|_| Default::default())); + if prev_extractor.is_some() { + panic!("registered a handler for the same entity twice") + } + + state.model_handlers.insert( + subscription_id, + Box::new(move |envelope, cx| { + if let Some(model) = model.upgrade(cx) { + let envelope = envelope.into_any().downcast::>().unwrap(); + model.update(cx, |model, cx| { + if let Err(error) = handler(model, *envelope, client.clone(), cx) { + log::error!("error handling message: {}", error) + } + }); + } + }), + ); + + Subscription { + client: Arc::downgrade(self), + id: subscription_id, + } + } + + pub fn subscribe_to_entity( self: &Arc, remote_id: u64, cx: &mut ModelContext, diff --git a/zed/src/worktree.rs b/zed/src/worktree.rs index 60d453b4083e9..93be506134121 100644 --- a/zed/src/worktree.rs +++ b/zed/src/worktree.rs @@ -211,11 +211,11 @@ impl Worktree { } let _subscriptions = vec![ - rpc.subscribe_from_model(remote_id, cx, Self::handle_add_peer), - rpc.subscribe_from_model(remote_id, cx, Self::handle_remove_peer), - rpc.subscribe_from_model(remote_id, cx, Self::handle_update), - rpc.subscribe_from_model(remote_id, cx, Self::handle_update_buffer), - rpc.subscribe_from_model(remote_id, cx, Self::handle_buffer_saved), + rpc.subscribe_to_entity(remote_id, cx, Self::handle_add_peer), + rpc.subscribe_to_entity(remote_id, cx, Self::handle_remove_peer), + rpc.subscribe_to_entity(remote_id, cx, Self::handle_update), + rpc.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer), + rpc.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved), ]; Worktree::Remote(RemoteWorktree { @@ -1070,12 +1070,12 @@ impl LocalWorktree { this.update(&mut cx, |worktree, cx| { let _subscriptions = vec![ - rpc.subscribe_from_model(remote_id, cx, Worktree::handle_add_peer), - rpc.subscribe_from_model(remote_id, cx, Worktree::handle_remove_peer), - rpc.subscribe_from_model(remote_id, cx, Worktree::handle_open_buffer), - rpc.subscribe_from_model(remote_id, cx, Worktree::handle_close_buffer), - rpc.subscribe_from_model(remote_id, cx, Worktree::handle_update_buffer), - rpc.subscribe_from_model(remote_id, cx, Worktree::handle_save_buffer), + rpc.subscribe_to_entity(remote_id, cx, Worktree::handle_add_peer), + rpc.subscribe_to_entity(remote_id, cx, Worktree::handle_remove_peer), + rpc.subscribe_to_entity(remote_id, cx, Worktree::handle_open_buffer), + rpc.subscribe_to_entity(remote_id, cx, Worktree::handle_close_buffer), + rpc.subscribe_to_entity(remote_id, cx, Worktree::handle_update_buffer), + rpc.subscribe_to_entity(remote_id, cx, Worktree::handle_save_buffer), ]; let worktree = worktree.as_local_mut().unwrap(); From 32111092bd575864dcb06e8d1af896ce53b2105c Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 20 Sep 2021 14:40:06 +0200 Subject: [PATCH 10/43] Turn `UserStore` into a model --- server/src/rpc.rs | 44 +++++++------- zed/src/channel.rs | 45 ++++++++++----- zed/src/lib.rs | 4 +- zed/src/people_panel.rs | 11 ++-- zed/src/presence.rs | 43 +++++++------- zed/src/test.rs | 3 +- zed/src/user.rs | 124 +++++++++++++++++++++------------------- zed/src/workspace.rs | 7 ++- 8 files changed, 150 insertions(+), 131 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index e2cf625857a8e..d6bb42d256f89 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -1289,7 +1289,7 @@ mod tests { github, AppState, Config, }; use async_std::{sync::RwLockReadGuard, task}; - use gpui::TestAppContext; + use gpui::{ModelHandle, TestAppContext}; use parking_lot::Mutex; use postage::{mpsc, watch}; use serde_json::json; @@ -1780,24 +1780,24 @@ mod tests { // Create an org that includes these 2 users. let db = &server.app_state.db; let org_id = db.create_org("Test Org", "test-org").await.unwrap(); - db.add_org_member(org_id, current_user_id(&user_store_a), false) + db.add_org_member(org_id, current_user_id(&user_store_a, &cx_a), false) .await .unwrap(); - db.add_org_member(org_id, current_user_id(&user_store_b), false) + db.add_org_member(org_id, current_user_id(&user_store_b, &cx_b), false) .await .unwrap(); // Create a channel that includes all the users. let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); - db.add_channel_member(channel_id, current_user_id(&user_store_a), false) + db.add_channel_member(channel_id, current_user_id(&user_store_a, &cx_a), false) .await .unwrap(); - db.add_channel_member(channel_id, current_user_id(&user_store_b), false) + db.add_channel_member(channel_id, current_user_id(&user_store_b, &cx_b), false) .await .unwrap(); db.create_channel_message( channel_id, - current_user_id(&user_store_b), + current_user_id(&user_store_b, &cx_b), "hello A, it's B.", OffsetDateTime::now_utc(), 1, @@ -1912,10 +1912,10 @@ mod tests { let db = &server.app_state.db; let org_id = db.create_org("Test Org", "test-org").await.unwrap(); let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); - db.add_org_member(org_id, current_user_id(&user_store_a), false) + db.add_org_member(org_id, current_user_id(&user_store_a, &cx_a), false) .await .unwrap(); - db.add_channel_member(channel_id, current_user_id(&user_store_a), false) + db.add_channel_member(channel_id, current_user_id(&user_store_a, &cx_a), false) .await .unwrap(); @@ -1964,7 +1964,6 @@ mod tests { #[gpui::test] async fn test_chat_reconnection(mut cx_a: TestAppContext, mut cx_b: TestAppContext) { cx_a.foreground().forbid_parking(); - let http = FakeHttpClient::new(|_| async move { Ok(surf::http::Response::new(404)) }); // Connect to a server as 2 clients. let mut server = TestServer::start().await; @@ -1975,24 +1974,24 @@ mod tests { // Create an org that includes these 2 users. let db = &server.app_state.db; let org_id = db.create_org("Test Org", "test-org").await.unwrap(); - db.add_org_member(org_id, current_user_id(&user_store_a), false) + db.add_org_member(org_id, current_user_id(&user_store_a, &cx_a), false) .await .unwrap(); - db.add_org_member(org_id, current_user_id(&user_store_b), false) + db.add_org_member(org_id, current_user_id(&user_store_b, &cx_b), false) .await .unwrap(); // Create a channel that includes all the users. let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); - db.add_channel_member(channel_id, current_user_id(&user_store_a), false) + db.add_channel_member(channel_id, current_user_id(&user_store_a, &cx_a), false) .await .unwrap(); - db.add_channel_member(channel_id, current_user_id(&user_store_b), false) + db.add_channel_member(channel_id, current_user_id(&user_store_b, &cx_b), false) .await .unwrap(); db.create_channel_message( channel_id, - current_user_id(&user_store_b), + current_user_id(&user_store_b, &cx_b), "hello A, it's B.", OffsetDateTime::now_utc(), 2, @@ -2000,8 +1999,6 @@ mod tests { .await .unwrap(); - let user_store_a = - UserStore::new(client_a.clone(), http.clone(), cx_a.background().as_ref()); let channels_a = cx_a.add_model(|cx| ChannelList::new(user_store_a, client_a, cx)); channels_a .condition(&mut cx_a, |list, _| list.available_channels().is_some()) @@ -2054,7 +2051,7 @@ mod tests { // Disconnect client B, ensuring we can still access its cached channel data. server.forbid_connections(); - server.disconnect_client(current_user_id(&user_store_b)); + server.disconnect_client(current_user_id(&user_store_b, &cx_b)); while !matches!( status_b.recv().await, Some(rpc::Status::ReconnectionError { .. }) @@ -2206,7 +2203,7 @@ mod tests { &mut self, cx: &mut TestAppContext, name: &str, - ) -> (Arc, Arc) { + ) -> (Arc, ModelHandle) { let user_id = self.app_state.db.create_user(name, false).await.unwrap(); let client_name = name.to_string(); let mut client = Client::new(); @@ -2254,8 +2251,9 @@ mod tests { .await .unwrap(); - let user_store = UserStore::new(client.clone(), http, &cx.background()); - let mut authed_user = user_store.watch_current_user(); + let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx)); + let mut authed_user = + user_store.read_with(cx, |user_store, _| user_store.watch_current_user()); while authed_user.recv().await.unwrap().is_none() {} (client, user_store) @@ -2314,8 +2312,10 @@ mod tests { } } - fn current_user_id(user_store: &Arc) -> UserId { - UserId::from_proto(user_store.current_user().unwrap().id) + fn current_user_id(user_store: &ModelHandle, cx: &TestAppContext) -> UserId { + UserId::from_proto( + user_store.read_with(cx, |user_store, _| user_store.current_user().unwrap().id), + ) } fn channel_messages(channel: &Channel) -> Vec<(String, String, bool)> { diff --git a/zed/src/channel.rs b/zed/src/channel.rs index 2eb904915c01d..f042a2e5088ec 100644 --- a/zed/src/channel.rs +++ b/zed/src/channel.rs @@ -6,7 +6,7 @@ use crate::{ use anyhow::{anyhow, Context, Result}; use gpui::{ sum_tree::{self, Bias, SumTree}, - Entity, ModelContext, ModelHandle, MutableAppContext, Task, WeakModelHandle, + AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, WeakModelHandle, }; use postage::prelude::Stream; use rand::prelude::*; @@ -26,7 +26,7 @@ pub struct ChannelList { available_channels: Option>, channels: HashMap>, rpc: Arc, - user_store: Arc, + user_store: ModelHandle, _task: Task>, } @@ -41,7 +41,7 @@ pub struct Channel { messages: SumTree, loaded_all_messages: bool, next_pending_message_id: usize, - user_store: Arc, + user_store: ModelHandle, rpc: Arc, rng: StdRng, _subscription: rpc::Subscription, @@ -87,7 +87,7 @@ impl Entity for ChannelList { impl ChannelList { pub fn new( - user_store: Arc, + user_store: ModelHandle, rpc: Arc, cx: &mut ModelContext, ) -> Self { @@ -186,7 +186,7 @@ impl Entity for Channel { impl Channel { pub fn new( details: ChannelDetails, - user_store: Arc, + user_store: ModelHandle, rpc: Arc, cx: &mut ModelContext, ) -> Self { @@ -199,7 +199,8 @@ impl Channel { cx.spawn(|channel, mut cx| { async move { let response = rpc.request(proto::JoinChannel { channel_id }).await?; - let messages = messages_from_proto(response.messages, &user_store).await?; + let messages = + messages_from_proto(response.messages, &user_store, &mut cx).await?; let loaded_all_messages = response.done; channel.update(&mut cx, |channel, cx| { @@ -241,6 +242,7 @@ impl Channel { let current_user = self .user_store + .read(cx) .current_user() .ok_or_else(|| anyhow!("current_user is not present"))?; @@ -272,6 +274,7 @@ impl Channel { let message = ChannelMessage::from_proto( response.message.ok_or_else(|| anyhow!("invalid message"))?, &user_store, + &mut cx, ) .await?; this.update(&mut cx, |this, cx| { @@ -301,7 +304,8 @@ impl Channel { }) .await?; let loaded_all_messages = response.done; - let messages = messages_from_proto(response.messages, &user_store).await?; + let messages = + messages_from_proto(response.messages, &user_store, &mut cx).await?; this.update(&mut cx, |this, cx| { this.loaded_all_messages = loaded_all_messages; this.insert_messages(messages, cx); @@ -324,7 +328,7 @@ impl Channel { cx.spawn(|this, mut cx| { async move { let response = rpc.request(proto::JoinChannel { channel_id }).await?; - let messages = messages_from_proto(response.messages, &user_store).await?; + let messages = messages_from_proto(response.messages, &user_store, &mut cx).await?; let loaded_all_messages = response.done; let pending_messages = this.update(&mut cx, |this, cx| { @@ -359,6 +363,7 @@ impl Channel { let message = ChannelMessage::from_proto( response.message.ok_or_else(|| anyhow!("invalid message"))?, &user_store, + &mut cx, ) .await?; this.update(&mut cx, |this, cx| { @@ -413,7 +418,7 @@ impl Channel { cx.spawn(|this, mut cx| { async move { - let message = ChannelMessage::from_proto(message, &user_store).await?; + let message = ChannelMessage::from_proto(message, &user_store, &mut cx).await?; this.update(&mut cx, |this, cx| { this.insert_messages(SumTree::from_item(message, &()), cx) }); @@ -486,7 +491,8 @@ impl Channel { async fn messages_from_proto( proto_messages: Vec, - user_store: &UserStore, + user_store: &ModelHandle, + cx: &mut AsyncAppContext, ) -> Result> { let unique_user_ids = proto_messages .iter() @@ -494,11 +500,15 @@ async fn messages_from_proto( .collect::>() .into_iter() .collect(); - user_store.load_users(unique_user_ids).await?; + user_store + .update(cx, |user_store, cx| { + user_store.load_users(unique_user_ids, cx) + }) + .await?; let mut messages = Vec::with_capacity(proto_messages.len()); for message in proto_messages { - messages.push(ChannelMessage::from_proto(message, &user_store).await?); + messages.push(ChannelMessage::from_proto(message, user_store, cx).await?); } let mut result = SumTree::new(); result.extend(messages, &()); @@ -517,9 +527,14 @@ impl From for ChannelDetails { impl ChannelMessage { pub async fn from_proto( message: proto::ChannelMessage, - user_store: &UserStore, + user_store: &ModelHandle, + cx: &mut AsyncAppContext, ) -> Result { - let sender = user_store.fetch_user(message.sender_id).await?; + let sender = user_store + .update(cx, |user_store, cx| { + user_store.fetch_user(message.sender_id, cx) + }) + .await?; Ok(ChannelMessage { id: ChannelMessageId::Saved(message.id), body: message.body, @@ -595,7 +610,7 @@ mod tests { let mut client = Client::new(); let http_client = FakeHttpClient::new(|_| async move { Ok(Response::new(404)) }); let server = FakeServer::for_client(user_id, &mut client, &cx).await; - let user_store = UserStore::new(client.clone(), http_client, cx.background().as_ref()); + let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx)); let channel_list = cx.add_model(|cx| ChannelList::new(user_store, client.clone(), cx)); channel_list.read_with(&cx, |list, _| assert_eq!(list.available_channels(), None)); diff --git a/zed/src/lib.rs b/zed/src/lib.rs index e07d04a5a4af7..397eed486b33a 100644 --- a/zed/src/lib.rs +++ b/zed/src/lib.rs @@ -28,7 +28,6 @@ use channel::ChannelList; use gpui::{action, keymap::Binding, ModelHandle}; use parking_lot::Mutex; use postage::watch; -use presence::Presence; use std::sync::Arc; pub use settings::Settings; @@ -46,10 +45,9 @@ pub struct AppState { pub languages: Arc, pub themes: Arc, pub rpc: Arc, - pub user_store: Arc, + pub user_store: ModelHandle, pub fs: Arc, pub channel_list: ModelHandle, - pub presence: ModelHandle, } pub fn init(app_state: &Arc, cx: &mut gpui::MutableAppContext) { diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs index 7e6d67548966e..246a5e04916cb 100644 --- a/zed/src/people_panel.rs +++ b/zed/src/people_panel.rs @@ -1,16 +1,17 @@ -use crate::presence::Presence; use gpui::{ elements::Empty, Element, ElementBox, Entity, ModelHandle, RenderContext, View, ViewContext, }; +use crate::user::UserStore; + pub struct PeoplePanel { - presence: ModelHandle, + user_store: ModelHandle, } impl PeoplePanel { - pub fn new(presence: ModelHandle, cx: &mut ViewContext) -> Self { - cx.observe(&presence, |_, _, cx| cx.notify()); - Self { presence } + pub fn new(user_store: ModelHandle, cx: &mut ViewContext) -> Self { + cx.observe(&user_store, |_, _, cx| cx.notify()); + Self { user_store } } } diff --git a/zed/src/presence.rs b/zed/src/presence.rs index 356baa22b4d87..2cdde21e2fbb0 100644 --- a/zed/src/presence.rs +++ b/zed/src/presence.rs @@ -106,24 +106,25 @@ impl Entity for Presence { type Event = Event; } -impl Collaborator { - async fn from_proto( - collaborator: proto::Collaborator, - user_store: &Arc, - ) -> Result { - let user = user_store.fetch_user(collaborator.user_id).await?; - let mut worktrees = Vec::new(); - for worktree in collaborator.worktrees { - let mut participants = Vec::new(); - for participant_id in worktree.participants { - participants.push(user_store.fetch_user(participant_id).await?); - } - worktrees.push(WorktreeMetadata { - root_name: worktree.root_name, - is_shared: worktree.is_shared, - participants, - }); - } - Ok(Self { user, worktrees }) - } -} +// impl Collaborator { +// async fn from_proto( +// collaborator: proto::Collaborator, +// user_store: &Arc, +// cx: &mut AsyncAppContext, +// ) -> Result { +// let user = user_store.fetch_user(collaborator.user_id).await?; +// let mut worktrees = Vec::new(); +// for worktree in collaborator.worktrees { +// let mut participants = Vec::new(); +// for participant_id in worktree.participants { +// participants.push(user_store.fetch_user(participant_id).await?); +// } +// worktrees.push(WorktreeMetadata { +// root_name: worktree.root_name, +// is_shared: worktree.is_shared, +// participants, +// }); +// } +// Ok(Self { user, worktrees }) +// } +// } diff --git a/zed/src/test.rs b/zed/src/test.rs index 77b284b24580a..9e02bb74db332 100644 --- a/zed/src/test.rs +++ b/zed/src/test.rs @@ -169,14 +169,13 @@ pub fn test_app_state(cx: &mut MutableAppContext) -> Arc { let themes = ThemeRegistry::new(Assets, cx.font_cache().clone()); let rpc = rpc::Client::new(); let http = FakeHttpClient::new(|_| async move { Ok(ServerResponse::new(404)) }); - let user_store = UserStore::new(rpc.clone(), http, cx.background()); + let user_store = cx.add_model(|cx| UserStore::new(rpc.clone(), http, cx)); Arc::new(AppState { settings_tx: Arc::new(Mutex::new(settings_tx)), settings, themes, languages: languages.clone(), channel_list: cx.add_model(|cx| ChannelList::new(user_store.clone(), rpc.clone(), cx)), - presence: cx.add_model(|cx| Presence::new(user_store.clone(), rpc.clone(), cx)), rpc, user_store, fs: Arc::new(RealFs), diff --git a/zed/src/user.rs b/zed/src/user.rs index 54e84d756ff81..8a050d516420a 100644 --- a/zed/src/user.rs +++ b/zed/src/user.rs @@ -5,13 +5,9 @@ use crate::{ }; use anyhow::{anyhow, Context, Result}; use futures::future; -use gpui::{executor, ImageData, Task}; -use parking_lot::Mutex; -use postage::{oneshot, prelude::Stream, sink::Sink, watch}; -use std::{ - collections::HashMap, - sync::{Arc, Weak}, -}; +use gpui::{Entity, ImageData, ModelContext, Task}; +use postage::{prelude::Stream, sink::Sink, watch}; +use std::{collections::HashMap, sync::Arc}; use zrpc::proto; #[derive(Debug)] @@ -22,41 +18,38 @@ pub struct User { } pub struct UserStore { - users: Mutex>>, + users: HashMap>, current_user: watch::Receiver>>, rpc: Arc, http: Arc, _maintain_current_user: Task<()>, } +pub enum Event {} + +impl Entity for UserStore { + type Event = Event; +} + impl UserStore { - pub fn new( - rpc: Arc, - http: Arc, - executor: &executor::Background, - ) -> Arc { + pub fn new(rpc: Arc, http: Arc, cx: &mut ModelContext) -> Self { let (mut current_user_tx, current_user_rx) = watch::channel(); - let (mut this_tx, mut this_rx) = oneshot::channel::>(); - let this = Arc::new(Self { + Self { users: Default::default(), current_user: current_user_rx, rpc: rpc.clone(), http, - _maintain_current_user: executor.spawn(async move { - let this = if let Some(this) = this_rx.recv().await { - this - } else { - return; - }; + _maintain_current_user: cx.spawn_weak(|this, mut cx| async move { let mut status = rpc.status(); while let Some(status) = status.recv().await { match status { Status::Connected { .. } => { - if let Some((this, user_id)) = this.upgrade().zip(rpc.user_id()) { - current_user_tx - .send(this.fetch_user(user_id).log_err().await) - .await - .ok(); + if let Some((this, user_id)) = this.upgrade(&cx).zip(rpc.user_id()) { + let user = this + .update(&mut cx, |this, cx| this.fetch_user(user_id, cx)) + .log_err() + .await; + current_user_tx.send(user).await.ok(); } } Status::SignedOut => { @@ -66,49 +59,60 @@ impl UserStore { } } }), - }); - let weak = Arc::downgrade(&this); - executor - .spawn(async move { this_tx.send(weak).await }) - .detach(); - this + } } - pub async fn load_users(&self, mut user_ids: Vec) -> Result<()> { - { - let users = self.users.lock(); - user_ids.retain(|id| !users.contains_key(id)); - } + pub fn load_users( + &mut self, + mut user_ids: Vec, + cx: &mut ModelContext, + ) -> Task> { + let rpc = self.rpc.clone(); + let http = self.http.clone(); + user_ids.retain(|id| !self.users.contains_key(id)); + cx.spawn_weak(|this, mut cx| async move { + if !user_ids.is_empty() { + let response = rpc.request(proto::GetUsers { user_ids }).await?; + let new_users = future::join_all( + response + .users + .into_iter() + .map(|user| User::new(user, http.as_ref())), + ) + .await; - if !user_ids.is_empty() { - let response = self.rpc.request(proto::GetUsers { user_ids }).await?; - let new_users = future::join_all( - response - .users - .into_iter() - .map(|user| User::new(user, self.http.as_ref())), - ) - .await; - let mut users = self.users.lock(); - for user in new_users { - users.insert(user.id, Arc::new(user)); + if let Some(this) = this.upgrade(&cx) { + this.update(&mut cx, |this, _| { + for user in new_users { + this.users.insert(user.id, Arc::new(user)); + } + }); + } } - } - Ok(()) + Ok(()) + }) } - pub async fn fetch_user(&self, user_id: u64) -> Result> { - if let Some(user) = self.users.lock().get(&user_id).cloned() { - return Ok(user); + pub fn fetch_user( + &mut self, + user_id: u64, + cx: &mut ModelContext, + ) -> Task>> { + if let Some(user) = self.users.get(&user_id).cloned() { + return cx.spawn_weak(|_, _| async move { Ok(user) }); } - self.load_users(vec![user_id]).await?; - self.users - .lock() - .get(&user_id) - .cloned() - .ok_or_else(|| anyhow!("server responded with no users")) + let load_users = self.load_users(vec![user_id], cx); + cx.spawn(|this, mut cx| async move { + load_users.await?; + this.update(&mut cx, |this, _| { + this.users + .get(&user_id) + .cloned() + .ok_or_else(|| anyhow!("server responded with no users")) + }) + }) } pub fn current_user(&self) -> Option> { diff --git a/zed/src/workspace.rs b/zed/src/workspace.rs index bcdbe17a545c4..3182fc3aab80b 100644 --- a/zed/src/workspace.rs +++ b/zed/src/workspace.rs @@ -333,7 +333,7 @@ pub struct Workspace { pub settings: watch::Receiver, languages: Arc, rpc: Arc, - user_store: Arc, + user_store: ModelHandle, fs: Arc, modal: Option, center: PaneGroup, @@ -381,11 +381,11 @@ impl Workspace { ); right_sidebar.add_item( "icons/user-16.svg", - cx.add_view(|cx| PeoplePanel::new(app_state.presence.clone(), cx)) + cx.add_view(|cx| PeoplePanel::new(app_state.user_store.clone(), cx)) .into(), ); - let mut current_user = app_state.user_store.watch_current_user().clone(); + let mut current_user = app_state.user_store.read(cx).watch_current_user().clone(); let mut connection_status = app_state.rpc.status().clone(); let _observe_current_user = cx.spawn_weak(|this, mut cx| async move { current_user.recv().await; @@ -965,6 +965,7 @@ impl Workspace { let theme = &self.settings.borrow().theme; let avatar = if let Some(avatar) = self .user_store + .read(cx) .current_user() .and_then(|user| user.avatar.clone()) { From 3d4dbf31407a2ac530f7e8219031482a55169ade Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 20 Sep 2021 15:05:41 +0200 Subject: [PATCH 11/43] Maintain collaborators in `UserStore` --- server/src/rpc.rs | 77 ++++++++++---------------- zed/src/lib.rs | 1 - zed/src/main.rs | 7 +-- zed/src/presence.rs | 130 -------------------------------------------- zed/src/rpc.rs | 6 +- zed/src/test.rs | 1 - zed/src/user.rs | 100 ++++++++++++++++++++++++++++++++-- 7 files changed, 131 insertions(+), 191 deletions(-) delete mode 100644 zed/src/presence.rs diff --git a/server/src/rpc.rs b/server/src/rpc.rs index d6bb42d256f89..b0d50d7272934 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -267,13 +267,12 @@ impl Server { .await .user_id_for_connection(request.sender_id)?; - let mut collaborator_user_ids = Vec::new(); + let mut collaborator_user_ids = HashSet::new(); + collaborator_user_ids.insert(host_user_id); for github_login in request.payload.collaborator_logins { match self.app_state.db.create_user(&github_login, false).await { Ok(collaborator_user_id) => { - if collaborator_user_id != host_user_id { - collaborator_user_ids.push(collaborator_user_id); - } + collaborator_user_ids.insert(collaborator_user_id); } Err(err) => { let message = err.to_string(); @@ -285,24 +284,19 @@ impl Server { } } - let worktree_id; - let mut user_ids; - { - let mut state = self.state.write().await; - worktree_id = state.add_worktree(Worktree { - host_connection_id: request.sender_id, - collaborator_user_ids: collaborator_user_ids.clone(), - root_name: request.payload.root_name, - share: None, - }); - user_ids = collaborator_user_ids; - user_ids.push(host_user_id); - } + let collaborator_user_ids = collaborator_user_ids.into_iter().collect::>(); + let worktree_id = self.state.write().await.add_worktree(Worktree { + host_connection_id: request.sender_id, + collaborator_user_ids: collaborator_user_ids.clone(), + root_name: request.payload.root_name, + share: None, + }); self.peer .respond(receipt, proto::OpenWorktreeResponse { worktree_id }) .await?; - self.update_collaborators_for_users(&user_ids).await?; + self.update_collaborators_for_users(&collaborator_user_ids) + .await?; Ok(()) } @@ -311,11 +305,6 @@ impl Server { self: Arc, mut request: TypedEnvelope, ) -> tide::Result<()> { - let host_user_id = self - .state - .read() - .await - .user_id_for_connection(request.sender_id)?; let worktree = request .payload .worktree @@ -333,15 +322,14 @@ impl Server { active_replica_ids: Default::default(), entries, }); - - let mut user_ids = worktree.collaborator_user_ids.clone(); - user_ids.push(host_user_id); + let collaborator_user_ids = worktree.collaborator_user_ids.clone(); drop(state); self.peer .respond(request.receipt(), proto::ShareWorktreeResponse {}) .await?; - self.update_collaborators_for_users(&user_ids).await?; + self.update_collaborators_for_users(&collaborator_user_ids) + .await?; } else { self.peer .respond_with_error( @@ -360,14 +348,9 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let worktree_id = request.payload.worktree_id; - let host_user_id = self - .state - .read() - .await - .user_id_for_connection(request.sender_id)?; let connection_ids; - let mut user_ids; + let collaborator_user_ids; { let mut state = self.state.write().await; let worktree = state.write_worktree(worktree_id, request.sender_id)?; @@ -376,8 +359,8 @@ impl Server { } connection_ids = worktree.connection_ids(); - user_ids = worktree.collaborator_user_ids.clone(); - user_ids.push(host_user_id); + collaborator_user_ids = worktree.collaborator_user_ids.clone(); + worktree.share.take(); for connection_id in &connection_ids { if let Some(connection) = state.connections.get_mut(connection_id) { @@ -391,7 +374,8 @@ impl Server { .send(conn_id, proto::UnshareWorktree { worktree_id }) }) .await?; - self.update_collaborators_for_users(&user_ids).await?; + self.update_collaborators_for_users(&collaborator_user_ids) + .await?; Ok(()) } @@ -409,7 +393,7 @@ impl Server { let response; let connection_ids; - let mut user_ids; + let collaborator_user_ids; let mut state = self.state.write().await; match state.join_worktree(request.sender_id, user_id, worktree_id) { Ok((peer_replica_id, worktree)) => { @@ -437,11 +421,8 @@ impl Server { replica_id: peer_replica_id as u32, peers, }; - - let host_connection_id = worktree.host_connection_id; connection_ids = worktree.connection_ids(); - user_ids = worktree.collaborator_user_ids.clone(); - user_ids.push(state.user_id_for_connection(host_connection_id)?); + collaborator_user_ids = worktree.collaborator_user_ids.clone(); } Err(error) => { self.peer @@ -471,7 +452,8 @@ impl Server { }) .await?; self.peer.respond(request.receipt(), response).await?; - self.update_collaborators_for_users(&user_ids).await?; + self.update_collaborators_for_users(&collaborator_user_ids) + .await?; Ok(()) } @@ -490,16 +472,14 @@ impl Server { sender_conn_id: ConnectionId, ) -> tide::Result<()> { let connection_ids; - let mut user_ids; - + let collaborator_user_ids; let mut is_host = false; let mut is_guest = false; { let mut state = self.state.write().await; let worktree = state.write_worktree(worktree_id, sender_conn_id)?; - let host_connection_id = worktree.host_connection_id; connection_ids = worktree.connection_ids(); - user_ids = worktree.collaborator_user_ids.clone(); + collaborator_user_ids = worktree.collaborator_user_ids.clone(); if worktree.host_connection_id == sender_conn_id { is_host = true; @@ -511,8 +491,6 @@ impl Server { share.active_replica_ids.remove(&replica_id); } } - - user_ids.push(state.user_id_for_connection(host_connection_id)?); } if is_host { @@ -533,7 +511,8 @@ impl Server { }) .await? } - self.update_collaborators_for_users(&user_ids).await?; + self.update_collaborators_for_users(&collaborator_user_ids) + .await?; Ok(()) } diff --git a/zed/src/lib.rs b/zed/src/lib.rs index 397eed486b33a..b7fa3f83d06b0 100644 --- a/zed/src/lib.rs +++ b/zed/src/lib.rs @@ -9,7 +9,6 @@ pub mod http; pub mod language; pub mod menus; pub mod people_panel; -pub mod presence; pub mod project_browser; pub mod rpc; pub mod settings; diff --git a/zed/src/main.rs b/zed/src/main.rs index 45aeb52dbcf66..c88b1465d1474 100644 --- a/zed/src/main.rs +++ b/zed/src/main.rs @@ -13,9 +13,7 @@ use zed::{ channel::ChannelList, chat_panel, editor, file_finder, fs::RealFs, - http, language, menus, - presence::Presence, - rpc, settings, theme_selector, + http, language, menus, rpc, settings, theme_selector, user::UserStore, workspace::{self, OpenNew, OpenParams, OpenPaths}, AppState, @@ -40,14 +38,13 @@ fn main() { app.run(move |cx| { let rpc = rpc::Client::new(); let http = http::client(); - let user_store = UserStore::new(rpc.clone(), http.clone(), cx.background()); + let user_store = cx.add_model(|cx| UserStore::new(rpc.clone(), http.clone(), cx)); let app_state = Arc::new(AppState { languages: languages.clone(), settings_tx: Arc::new(Mutex::new(settings_tx)), settings, themes, channel_list: cx.add_model(|cx| ChannelList::new(user_store.clone(), rpc.clone(), cx)), - presence: cx.add_model(|cx| Presence::new(user_store.clone(), rpc.clone(), cx)), rpc, user_store, fs: Arc::new(RealFs), diff --git a/zed/src/presence.rs b/zed/src/presence.rs deleted file mode 100644 index 2cdde21e2fbb0..0000000000000 --- a/zed/src/presence.rs +++ /dev/null @@ -1,130 +0,0 @@ -use crate::{ - rpc::Client, - user::{User, UserStore}, - util::TryFutureExt, -}; -use anyhow::Result; -use gpui::{Entity, ModelContext, Task}; -use postage::prelude::Stream; -use smol::future::FutureExt; -use std::{collections::HashSet, sync::Arc, time::Duration}; -use zrpc::proto; - -pub struct Presence { - collaborators: Vec, - user_store: Arc, - rpc: Arc, - _maintain_people: Task<()>, -} - -#[derive(Debug)] -struct Collaborator { - user: Arc, - worktrees: Vec, -} - -#[derive(Debug)] -struct WorktreeMetadata { - root_name: String, - is_shared: bool, - participants: Vec>, -} - -impl Presence { - pub fn new(user_store: Arc, rpc: Arc, cx: &mut ModelContext) -> Self { - let _maintain_collaborators = cx.spawn_weak(|this, mut cx| { - let user_store = user_store.clone(); - let foreground = cx.foreground(); - async move { - let mut current_user = user_store.watch_current_user(); - loop { - let timer = foreground.timer(Duration::from_secs(2)); - let next_current_user = async { - current_user.recv().await; - }; - - next_current_user.race(timer).await; - if current_user.borrow().is_some() { - if let Some(this) = cx.read(|cx| this.upgrade(cx)) { - this.update(&mut cx, |this, cx| this.refresh(cx)) - .log_err() - .await; - } - } - } - } - }); - - Self { - collaborators: Vec::new(), - user_store, - rpc, - _maintain_people: _maintain_collaborators, - } - } - - fn refresh(&self, cx: &mut ModelContext) -> Task> { - cx.spawn(|this, mut cx| { - let rpc = self.rpc.clone(); - let user_store = self.user_store.clone(); - async move { - // let response = rpc.request(proto::GetCollaborators {}).await?; - // let mut user_ids = HashSet::new(); - // for collaborator in &response.collaborators { - // user_ids.insert(collaborator.user_id); - // user_ids.extend( - // collaborator - // .worktrees - // .iter() - // .flat_map(|w| &w.participants) - // .copied(), - // ); - // } - // user_store - // .load_users(user_ids.into_iter().collect()) - // .await?; - - // let mut collaborators = Vec::new(); - // for collaborator in response.collaborators { - // collaborators.push(Collaborator::from_proto(collaborator, &user_store).await?); - // } - - // this.update(&mut cx, |this, cx| { - // this.collaborators = collaborators; - // cx.notify(); - // }); - - Ok(()) - } - }) - } -} - -pub enum Event {} - -impl Entity for Presence { - type Event = Event; -} - -// impl Collaborator { -// async fn from_proto( -// collaborator: proto::Collaborator, -// user_store: &Arc, -// cx: &mut AsyncAppContext, -// ) -> Result { -// let user = user_store.fetch_user(collaborator.user_id).await?; -// let mut worktrees = Vec::new(); -// for worktree in collaborator.worktrees { -// let mut participants = Vec::new(); -// for participant_id in worktree.participants { -// participants.push(user_store.fetch_user(participant_id).await?); -// } -// worktrees.push(WorktreeMetadata { -// root_name: worktree.root_name, -// is_shared: worktree.is_shared, -// participants, -// }); -// } -// Ok(Self { user, worktrees }) -// } -// } diff --git a/zed/src/rpc.rs b/zed/src/rpc.rs index 6a81bf3c4a28d..7562a8cc1c0ab 100644 --- a/zed/src/rpc.rs +++ b/zed/src/rpc.rs @@ -230,7 +230,11 @@ impl Client { } } - pub fn subscribe(self: &Arc, cx: ModelContext, mut handler: F) -> Subscription + pub fn subscribe( + self: &Arc, + cx: &mut ModelContext, + mut handler: F, + ) -> Subscription where T: EnvelopedMessage, M: Entity, diff --git a/zed/src/test.rs b/zed/src/test.rs index 9e02bb74db332..eb6bf20f45623 100644 --- a/zed/src/test.rs +++ b/zed/src/test.rs @@ -4,7 +4,6 @@ use crate::{ fs::RealFs, http::{HttpClient, Request, Response, ServerResponse}, language::LanguageRegistry, - presence::Presence, rpc::{self, Client, Credentials, EstablishConnectionError}, settings::{self, ThemeRegistry}, time::ReplicaId, diff --git a/zed/src/user.rs b/zed/src/user.rs index 8a050d516420a..637c50e150d6e 100644 --- a/zed/src/user.rs +++ b/zed/src/user.rs @@ -1,14 +1,17 @@ use crate::{ http::{HttpClient, Method, Request, Url}, - rpc::{Client, Status}, + rpc::{self, Client, Status}, util::TryFutureExt, }; use anyhow::{anyhow, Context, Result}; use futures::future; -use gpui::{Entity, ImageData, ModelContext, Task}; +use gpui::{AsyncAppContext, Entity, ImageData, ModelContext, ModelHandle, Task}; use postage::{prelude::Stream, sink::Sink, watch}; -use std::{collections::HashMap, sync::Arc}; -use zrpc::proto; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; +use zrpc::{proto, TypedEnvelope}; #[derive(Debug)] pub struct User { @@ -17,11 +20,26 @@ pub struct User { pub avatar: Option>, } +#[derive(Debug)] +struct Collaborator { + pub user: Arc, + pub worktrees: Vec, +} + +#[derive(Debug)] +struct WorktreeMetadata { + pub root_name: String, + pub is_shared: bool, + pub participants: Vec>, +} + pub struct UserStore { users: HashMap>, current_user: watch::Receiver>>, + collaborators: Vec, rpc: Arc, http: Arc, + _maintain_collaborators: rpc::Subscription, _maintain_current_user: Task<()>, } @@ -37,8 +55,10 @@ impl UserStore { Self { users: Default::default(), current_user: current_user_rx, + collaborators: Default::default(), rpc: rpc.clone(), http, + _maintain_collaborators: rpc.subscribe(cx, Self::update_collaborators), _maintain_current_user: cx.spawn_weak(|this, mut cx| async move { let mut status = rpc.status(); while let Some(status) = status.recv().await { @@ -62,6 +82,45 @@ impl UserStore { } } + fn update_collaborators( + &mut self, + message: TypedEnvelope, + _: Arc, + cx: &mut ModelContext, + ) -> Result<()> { + let mut user_ids = HashSet::new(); + for collaborator in &message.payload.collaborators { + user_ids.insert(collaborator.user_id); + user_ids.extend( + collaborator + .worktrees + .iter() + .flat_map(|w| &w.participants) + .copied(), + ); + } + + let load_users = self.load_users(user_ids.into_iter().collect(), cx); + cx.spawn(|this, mut cx| async move { + load_users.await?; + + let mut collaborators = Vec::new(); + for collaborator in message.payload.collaborators { + collaborators.push(Collaborator::from_proto(collaborator, &this, &mut cx).await?); + } + + this.update(&mut cx, |this, cx| { + this.collaborators = collaborators; + cx.notify(); + }); + + Result::<_, anyhow::Error>::Ok(()) + }) + .detach(); + + Ok(()) + } + pub fn load_users( &mut self, mut user_ids: Vec, @@ -134,6 +193,39 @@ impl User { } } +impl Collaborator { + async fn from_proto( + collaborator: proto::Collaborator, + user_store: &ModelHandle, + cx: &mut AsyncAppContext, + ) -> Result { + let user = user_store + .update(cx, |user_store, cx| { + user_store.fetch_user(collaborator.user_id, cx) + }) + .await?; + let mut worktrees = Vec::new(); + for worktree in collaborator.worktrees { + let mut participants = Vec::new(); + for participant_id in worktree.participants { + participants.push( + user_store + .update(cx, |user_store, cx| { + user_store.fetch_user(participant_id, cx) + }) + .await?, + ); + } + worktrees.push(WorktreeMetadata { + root_name: worktree.root_name, + is_shared: worktree.is_shared, + participants, + }); + } + Ok(Self { user, worktrees }) + } +} + async fn fetch_avatar(http: &dyn HttpClient, url: &str) -> Result> { let url = Url::parse(url).with_context(|| format!("failed to parse avatar url {:?}", url))?; let mut request = Request::new(Method::Get, url); From 67946b722483f3e84cfe36d582ba1c899c0e96b5 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 20 Sep 2021 16:40:31 +0200 Subject: [PATCH 12/43] Add an integration test to verify collaborators are kept up to date --- server/src/db.rs | 101 ++++-------------------------------- server/src/rpc.rs | 127 +++++++++++++++++++++++++++++++++++++++++++--- zed/src/user.rs | 44 +++++++++++----- 3 files changed, 161 insertions(+), 111 deletions(-) diff --git a/server/src/db.rs b/server/src/db.rs index a826220b11b36..15290d587c43a 100644 --- a/server/src/db.rs +++ b/server/src/db.rs @@ -108,53 +108,16 @@ impl Db { }) } - pub async fn get_users_by_ids( - &self, - requester_id: UserId, - ids: impl Iterator, - ) -> Result> { - let mut include_requester = false; - let ids = ids - .map(|id| { - if id == requester_id { - include_requester = true; - } - id.0 - }) - .collect::>(); - + pub async fn get_users_by_ids(&self, ids: impl Iterator) -> Result> { + let ids = ids.map(|id| id.0).collect::>(); test_support!(self, { - // Only return users that are in a common channel with the requesting user. - // Also allow the requesting user to return their own data, even if they aren't - // in any channels. let query = " - SELECT - users.* - FROM - users, channel_memberships - WHERE - users.id = ANY ($1) AND - channel_memberships.user_id = users.id AND - channel_memberships.channel_id IN ( - SELECT channel_id - FROM channel_memberships - WHERE channel_memberships.user_id = $2 - ) - UNION - SELECT - users.* - FROM - users - WHERE - $3 AND users.id = $2 + SELECT users.* + FROM users + WHERE users.id = ANY ($1) "; - sqlx::query_as(query) - .bind(&ids) - .bind(requester_id) - .bind(include_requester) - .fetch_all(&self.pool) - .await + sqlx::query_as(query).bind(&ids).fetch_all(&self.pool).await }) } @@ -582,45 +545,11 @@ pub mod tests { let friend1 = db.create_user("friend-1", false).await.unwrap(); let friend2 = db.create_user("friend-2", false).await.unwrap(); let friend3 = db.create_user("friend-3", false).await.unwrap(); - let stranger = db.create_user("stranger", false).await.unwrap(); - // A user can read their own info, even if they aren't in any channels. assert_eq!( - db.get_users_by_ids( - user, - [user, friend1, friend2, friend3, stranger].iter().copied() - ) - .await - .unwrap(), - vec![User { - id: user, - github_login: "user".to_string(), - admin: false, - },], - ); - - // A user can read the info of any other user who is in a shared channel - // with them. - let org = db.create_org("test org", "test-org").await.unwrap(); - let chan1 = db.create_org_channel(org, "channel-1").await.unwrap(); - let chan2 = db.create_org_channel(org, "channel-2").await.unwrap(); - let chan3 = db.create_org_channel(org, "channel-3").await.unwrap(); - - db.add_channel_member(chan1, user, false).await.unwrap(); - db.add_channel_member(chan2, user, false).await.unwrap(); - db.add_channel_member(chan1, friend1, false).await.unwrap(); - db.add_channel_member(chan1, friend2, false).await.unwrap(); - db.add_channel_member(chan2, friend2, false).await.unwrap(); - db.add_channel_member(chan2, friend3, false).await.unwrap(); - db.add_channel_member(chan3, stranger, false).await.unwrap(); - - assert_eq!( - db.get_users_by_ids( - user, - [user, friend1, friend2, friend3, stranger].iter().copied() - ) - .await - .unwrap(), + db.get_users_by_ids([user, friend1, friend2, friend3].iter().copied()) + .await + .unwrap(), vec![ User { id: user, @@ -644,18 +573,6 @@ pub mod tests { } ] ); - - // The user's own info is only returned if they request it. - assert_eq!( - db.get_users_by_ids(user, [friend1].iter().copied()) - .await - .unwrap(), - vec![User { - id: friend1, - github_login: "friend-1".to_string(), - admin: false, - },] - ) } #[gpui::test] diff --git a/server/src/rpc.rs b/server/src/rpc.rs index b0d50d7272934..e6a8224c1fe61 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -149,6 +149,9 @@ impl Server { let (connection_id, handle_io, mut incoming_rx) = this.peer.add_connection(connection).await; this.add_connection(connection_id, user_id).await; + if let Err(err) = this.update_collaborators_for_users(&[user_id]).await { + log::error!("error updating collaborators for {:?}: {}", user_id, err); + } let handle_io = handle_io.fuse(); futures::pin_mut!(handle_io); @@ -668,17 +671,12 @@ impl Server { self: Arc, request: TypedEnvelope, ) -> tide::Result<()> { - let user_id = self - .state - .read() - .await - .user_id_for_connection(request.sender_id)?; let receipt = request.receipt(); let user_ids = request.payload.user_ids.into_iter().map(UserId::from_proto); let users = self .app_state .db - .get_users_by_ids(user_id, user_ids) + .get_users_by_ids(user_ids) .await? .into_iter() .map(|user| proto::User { @@ -2150,6 +2148,123 @@ mod tests { .await; } + #[gpui::test] + async fn test_collaborators( + mut cx_a: TestAppContext, + mut cx_b: TestAppContext, + mut cx_c: TestAppContext, + ) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::new()); + + // Connect to a server as 3 clients. + let mut server = TestServer::start().await; + let (client_a, user_store_a) = server.create_client(&mut cx_a, "user_a").await; + let (client_b, user_store_b) = server.create_client(&mut cx_b, "user_b").await; + let (_client_c, user_store_c) = server.create_client(&mut cx_c, "user_c").await; + + let fs = Arc::new(FakeFs::new()); + + // Share a worktree as client A. + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, + }), + ) + .await; + + let worktree_a = Worktree::open_local( + client_a.clone(), + "/a".as_ref(), + fs.clone(), + lang_registry.clone(), + &mut cx_a.to_async(), + ) + .await + .unwrap(); + + user_store_a + .condition(&cx_a, |user_store, _| { + collaborators(user_store) == vec![("user_a", vec![("a", vec![])])] + }) + .await; + user_store_b + .condition(&cx_b, |user_store, _| { + collaborators(user_store) == vec![("user_a", vec![("a", vec![])])] + }) + .await; + user_store_c + .condition(&cx_c, |user_store, _| { + collaborators(user_store) == vec![("user_a", vec![("a", vec![])])] + }) + .await; + + let worktree_id = worktree_a + .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx)) + .await + .unwrap(); + + let _worktree_b = Worktree::open_remote( + client_b.clone(), + worktree_id, + lang_registry.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + user_store_a + .condition(&cx_a, |user_store, _| { + collaborators(user_store) == vec![("user_a", vec![("a", vec!["user_b"])])] + }) + .await; + user_store_b + .condition(&cx_b, |user_store, _| { + collaborators(user_store) == vec![("user_a", vec![("a", vec!["user_b"])])] + }) + .await; + user_store_c + .condition(&cx_c, |user_store, _| { + collaborators(user_store) == vec![("user_a", vec![("a", vec!["user_b"])])] + }) + .await; + + cx_a.update(move |_| drop(worktree_a)); + user_store_a + .condition(&cx_a, |user_store, _| collaborators(user_store) == vec![]) + .await; + user_store_b + .condition(&cx_b, |user_store, _| collaborators(user_store) == vec![]) + .await; + user_store_c + .condition(&cx_c, |user_store, _| collaborators(user_store) == vec![]) + .await; + + fn collaborators(user_store: &UserStore) -> Vec<(&str, Vec<(&str, Vec<&str>)>)> { + user_store + .collaborators() + .iter() + .map(|collaborator| { + let worktrees = collaborator + .worktrees + .iter() + .map(|w| { + ( + w.root_name.as_str(), + w.participants + .iter() + .map(|p| p.github_login.as_str()) + .collect(), + ) + }) + .collect(); + (collaborator.user.github_login.as_str(), worktrees) + }) + .collect() + } + } + struct TestServer { peer: Arc, app_state: Arc, diff --git a/zed/src/user.rs b/zed/src/user.rs index 637c50e150d6e..1ed7a1662a4de 100644 --- a/zed/src/user.rs +++ b/zed/src/user.rs @@ -1,6 +1,6 @@ use crate::{ http::{HttpClient, Method, Request, Url}, - rpc::{self, Client, Status}, + rpc::{Client, Status}, util::TryFutureExt, }; use anyhow::{anyhow, Context, Result}; @@ -21,13 +21,13 @@ pub struct User { } #[derive(Debug)] -struct Collaborator { +pub struct Collaborator { pub user: Arc, pub worktrees: Vec, } #[derive(Debug)] -struct WorktreeMetadata { +pub struct WorktreeMetadata { pub root_name: String, pub is_shared: bool, pub participants: Vec>, @@ -39,7 +39,7 @@ pub struct UserStore { collaborators: Vec, rpc: Arc, http: Arc, - _maintain_collaborators: rpc::Subscription, + _maintain_collaborators: Task<()>, _maintain_current_user: Task<()>, } @@ -52,13 +52,31 @@ impl Entity for UserStore { impl UserStore { pub fn new(rpc: Arc, http: Arc, cx: &mut ModelContext) -> Self { let (mut current_user_tx, current_user_rx) = watch::channel(); + let (mut update_collaborators_tx, mut update_collaborators_rx) = + watch::channel::>(); + let update_collaborators_subscription = rpc.subscribe( + cx, + move |_: &mut Self, msg: TypedEnvelope, _, _| { + let _ = update_collaborators_tx.blocking_send(Some(msg.payload)); + Ok(()) + }, + ); Self { users: Default::default(), current_user: current_user_rx, collaborators: Default::default(), rpc: rpc.clone(), http, - _maintain_collaborators: rpc.subscribe(cx, Self::update_collaborators), + _maintain_collaborators: cx.spawn_weak(|this, mut cx| async move { + let _subscription = update_collaborators_subscription; + while let Some(message) = update_collaborators_rx.recv().await { + if let Some((message, this)) = message.zip(this.upgrade(&cx)) { + this.update(&mut cx, |this, cx| this.update_collaborators(message, cx)) + .log_err() + .await; + } + } + }), _maintain_current_user: cx.spawn_weak(|this, mut cx| async move { let mut status = rpc.status(); while let Some(status) = status.recv().await { @@ -84,12 +102,11 @@ impl UserStore { fn update_collaborators( &mut self, - message: TypedEnvelope, - _: Arc, + message: proto::UpdateCollaborators, cx: &mut ModelContext, - ) -> Result<()> { + ) -> Task> { let mut user_ids = HashSet::new(); - for collaborator in &message.payload.collaborators { + for collaborator in &message.collaborators { user_ids.insert(collaborator.user_id); user_ids.extend( collaborator @@ -105,7 +122,7 @@ impl UserStore { load_users.await?; let mut collaborators = Vec::new(); - for collaborator in message.payload.collaborators { + for collaborator in message.collaborators { collaborators.push(Collaborator::from_proto(collaborator, &this, &mut cx).await?); } @@ -114,11 +131,12 @@ impl UserStore { cx.notify(); }); - Result::<_, anyhow::Error>::Ok(()) + Ok(()) }) - .detach(); + } - Ok(()) + pub fn collaborators(&self) -> &[Collaborator] { + &self.collaborators } pub fn load_users( From 23652f2ba69e2cc01f90f62bd9a749b51c6b519a Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 20 Sep 2021 20:04:48 +0200 Subject: [PATCH 13/43] Start on `PeoplePanel::render` Co-Authored-By: Max Brunsfeld Co-Authored-By: Nathan Sobo --- zed/src/people_panel.rs | 96 ++++++++++++++++++++++++++++++++++++++--- zed/src/theme.rs | 11 +++++ zed/src/workspace.rs | 6 ++- 3 files changed, 104 insertions(+), 9 deletions(-) diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs index 246a5e04916cb..38673edefd42c 100644 --- a/zed/src/people_panel.rs +++ b/zed/src/people_panel.rs @@ -1,17 +1,99 @@ use gpui::{ - elements::Empty, Element, ElementBox, Entity, ModelHandle, RenderContext, View, ViewContext, + elements::*, Element, ElementBox, Entity, ModelHandle, RenderContext, View, ViewContext, }; +use postage::watch; -use crate::user::UserStore; +use crate::{ + theme::Theme, + user::{Collaborator, UserStore}, + Settings, +}; pub struct PeoplePanel { + collaborators: ListState, user_store: ModelHandle, } impl PeoplePanel { - pub fn new(user_store: ModelHandle, cx: &mut ViewContext) -> Self { - cx.observe(&user_store, |_, _, cx| cx.notify()); - Self { user_store } + pub fn new( + user_store: ModelHandle, + settings: watch::Receiver, + cx: &mut ViewContext, + ) -> Self { + cx.observe(&user_store, Self::update_collaborators); + Self { + collaborators: ListState::new( + user_store.read(cx).collaborators().len(), + Orientation::Top, + 1000., + { + let user_store = user_store.clone(); + move |ix, cx| { + let user_store = user_store.read(cx); + let settings = settings.borrow(); + Self::render_collaborator(&user_store.collaborators()[ix], &settings.theme) + } + }, + ), + user_store, + } + } + + fn update_collaborators(&mut self, _: ModelHandle, cx: &mut ViewContext) { + self.collaborators + .reset(self.user_store.read(cx).collaborators().len()); + cx.notify(); + } + + fn render_collaborator(collaborator: &Collaborator, theme: &Theme) -> ElementBox { + Flex::column() + .with_child( + Flex::row() + .with_children(collaborator.user.avatar.clone().map(|avatar| { + ConstrainedBox::new( + Image::new(avatar) + .with_style(theme.people_panel.worktree_host_avatar) + .boxed(), + ) + .with_width(20.) + .boxed() + })) + .with_child( + Label::new( + collaborator.user.github_login.clone(), + theme.people_panel.collaborator_username.clone(), + ) + .boxed(), + ) + .boxed(), + ) + .with_children(collaborator.worktrees.iter().map(|worktree| { + Flex::row() + .with_child( + Container::new( + Label::new( + worktree.root_name.clone(), + theme.people_panel.worktree_name.text.clone(), + ) + .boxed(), + ) + .with_style(theme.people_panel.worktree_name.container) + .boxed(), + ) + .with_children(worktree.participants.iter().filter_map(|participant| { + participant.avatar.clone().map(|avatar| { + ConstrainedBox::new( + Image::new(avatar) + .with_style(theme.people_panel.worktree_guest_avatar) + .boxed(), + ) + .with_width(16.) + .boxed() + }) + })) + .boxed() + })) + .boxed() } } @@ -26,7 +108,7 @@ impl View for PeoplePanel { "PeoplePanel" } - fn render(&mut self, _: &mut RenderContext) -> ElementBox { - Empty::new().boxed() + fn render(&mut self, cx: &mut RenderContext) -> ElementBox { + List::new(self.collaborators.clone()).boxed() } } diff --git a/zed/src/theme.rs b/zed/src/theme.rs index a96945fecc101..38c93db14b671 100644 --- a/zed/src/theme.rs +++ b/zed/src/theme.rs @@ -23,6 +23,7 @@ pub struct Theme { pub name: String, pub workspace: Workspace, pub chat_panel: ChatPanel, + pub people_panel: PeoplePanel, pub selector: Selector, pub editor: EditorStyle, pub syntax: SyntaxTheme, @@ -103,6 +104,16 @@ pub struct ChatPanel { pub hovered_sign_in_prompt: TextStyle, } +#[derive(Deserialize)] +pub struct PeoplePanel { + #[serde(flatten)] + pub container: ContainerStyle, + pub collaborator_username: TextStyle, + pub worktree_name: ContainedText, + pub worktree_host_avatar: ImageStyle, + pub worktree_guest_avatar: ImageStyle, +} + #[derive(Deserialize)] pub struct ChatMessage { #[serde(flatten)] diff --git a/zed/src/workspace.rs b/zed/src/workspace.rs index 3182fc3aab80b..baf14936ccf72 100644 --- a/zed/src/workspace.rs +++ b/zed/src/workspace.rs @@ -381,8 +381,10 @@ impl Workspace { ); right_sidebar.add_item( "icons/user-16.svg", - cx.add_view(|cx| PeoplePanel::new(app_state.user_store.clone(), cx)) - .into(), + cx.add_view(|cx| { + PeoplePanel::new(app_state.user_store.clone(), app_state.settings.clone(), cx) + }) + .into(), ); let mut current_user = app_state.user_store.read(cx).watch_current_user().clone(); From 4ca58144700dd8c4cdca8ec8c4ddfd2c996b2a5a Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 20 Sep 2021 13:38:59 -0700 Subject: [PATCH 14/43] Add missing people panel properties to base theme Co-Authored-By: Nathan Sobo --- zed/assets/themes/_base.toml | 8 ++++++++ zed/src/people_panel.rs | 2 +- zed/src/theme.rs | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/zed/assets/themes/_base.toml b/zed/assets/themes/_base.toml index 1a2999379c4e4..3bb2bfc006157 100644 --- a/zed/assets/themes/_base.toml +++ b/zed/assets/themes/_base.toml @@ -120,6 +120,14 @@ underline = true extends = "$chat_panel.sign_in_prompt" color = "$text.1.color" +[people_panel] +host_username = "$text.0" +worktree_host_avatar = { corner_radius = 10 } +worktree_guest_avatar = { corner_radius = 8 } + +[people_panel.worktree_name] +extends = "$text.0" + [selector] background = "$surface.0" padding = 8 diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs index 38673edefd42c..7d935c714e05f 100644 --- a/zed/src/people_panel.rs +++ b/zed/src/people_panel.rs @@ -61,7 +61,7 @@ impl PeoplePanel { .with_child( Label::new( collaborator.user.github_login.clone(), - theme.people_panel.collaborator_username.clone(), + theme.people_panel.host_username.clone(), ) .boxed(), ) diff --git a/zed/src/theme.rs b/zed/src/theme.rs index 38c93db14b671..46c66df82c21d 100644 --- a/zed/src/theme.rs +++ b/zed/src/theme.rs @@ -108,7 +108,7 @@ pub struct ChatPanel { pub struct PeoplePanel { #[serde(flatten)] pub container: ContainerStyle, - pub collaborator_username: TextStyle, + pub host_username: TextStyle, pub worktree_name: ContainedText, pub worktree_host_avatar: ImageStyle, pub worktree_guest_avatar: ImageStyle, From a068019d94e439a783e3d06ad65103cabd19512a Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 20 Sep 2021 15:44:28 -0700 Subject: [PATCH 15/43] Add `ZED_IMPERSONATE` env var, for testing Co-Authored-By: Nathan Sobo --- server/src/auth.rs | 28 +++++++++++++++++++++++----- server/src/db.rs | 9 ++++++--- zed/src/rpc.rs | 27 ++++++++++++++++++++------- 3 files changed, 49 insertions(+), 15 deletions(-) diff --git a/server/src/auth.rs b/server/src/auth.rs index 1f6ec5f1db176..e60802285ec60 100644 --- a/server/src/auth.rs +++ b/server/src/auth.rs @@ -18,7 +18,7 @@ use scrypt::{ use serde::{Deserialize, Serialize}; use std::{borrow::Cow, convert::TryFrom, sync::Arc}; use surf::{StatusCode, Url}; -use tide::Server; +use tide::{log, Server}; use zrpc::auth as zed_auth; static CURRENT_GITHUB_USER: &'static str = "current_github_user"; @@ -121,6 +121,7 @@ pub fn add_routes(app: &mut Server>) { struct NativeAppSignInParams { native_app_port: String, native_app_public_key: String, + impersonate: Option, } async fn get_sign_in(mut request: Request) -> tide::Result { @@ -142,11 +143,15 @@ async fn get_sign_in(mut request: Request) -> tide::Result { let app_sign_in_params: Option = request.query().ok(); if let Some(query) = app_sign_in_params { - redirect_url - .query_pairs_mut() + let mut redirect_query = redirect_url.query_pairs_mut(); + redirect_query .clear() .append_pair("native_app_port", &query.native_app_port) .append_pair("native_app_public_key", &query.native_app_public_key); + + if let Some(impersonate) = &query.impersonate { + redirect_query.append_pair("impersonate", impersonate); + } } let (auth_url, csrf_token) = request @@ -222,7 +227,20 @@ async fn get_auth_callback(mut request: Request) -> tide::Result { // When signing in from the native app, generate a new access token for the current user. Return // a redirect so that the user's browser sends this access token to the locally-running app. if let Some((user, app_sign_in_params)) = user.zip(query.native_app_sign_in_params) { - let access_token = create_access_token(request.db(), user.id).await?; + let mut user_id = user.id; + if let Some(impersonated_login) = app_sign_in_params.impersonate { + log::info!("attempting to impersonate user @{}", impersonated_login); + if let Some(user) = request.db().get_users_by_ids([user_id]).await?.first() { + if user.admin { + user_id = request.db().create_user(&impersonated_login, false).await?; + log::info!("impersonating user {}", user_id.0); + } else { + log::info!("refusing to impersonate user"); + } + } + } + + let access_token = create_access_token(request.db(), user_id).await?; let native_app_public_key = zed_auth::PublicKey::try_from(app_sign_in_params.native_app_public_key.clone()) .context("failed to parse app public key")?; @@ -232,7 +250,7 @@ async fn get_auth_callback(mut request: Request) -> tide::Result { return Ok(tide::Redirect::new(&format!( "http://127.0.0.1:{}?user_id={}&access_token={}", - app_sign_in_params.native_app_port, user.id.0, encrypted_access_token, + app_sign_in_params.native_app_port, user_id.0, encrypted_access_token, )) .into()); } diff --git a/server/src/db.rs b/server/src/db.rs index 15290d587c43a..002b82741c6de 100644 --- a/server/src/db.rs +++ b/server/src/db.rs @@ -108,8 +108,11 @@ impl Db { }) } - pub async fn get_users_by_ids(&self, ids: impl Iterator) -> Result> { - let ids = ids.map(|id| id.0).collect::>(); + pub async fn get_users_by_ids( + &self, + ids: impl IntoIterator, + ) -> Result> { + let ids = ids.into_iter().map(|id| id.0).collect::>(); test_support!(self, { let query = " SELECT users.* @@ -547,7 +550,7 @@ pub mod tests { let friend3 = db.create_user("friend-3", false).await.unwrap(); assert_eq!( - db.get_users_by_ids([user, friend1, friend2, friend3].iter().copied()) + db.get_users_by_ids([user, friend1, friend2, friend3]) .await .unwrap(), vec![ diff --git a/zed/src/rpc.rs b/zed/src/rpc.rs index 7562a8cc1c0ab..f226091baff33 100644 --- a/zed/src/rpc.rs +++ b/zed/src/rpc.rs @@ -14,6 +14,7 @@ use std::{ any::TypeId, collections::HashMap, convert::TryFrom, + fmt::Write as _, future::Future, sync::{Arc, Weak}, time::{Duration, Instant}, @@ -29,6 +30,7 @@ use zrpc::{ lazy_static! { static ref ZED_SERVER_URL: String = std::env::var("ZED_SERVER_URL").unwrap_or("https://zed.dev:443".to_string()); + static ref IMPERSONATE_LOGIN: Option = std::env::var("ZED_IMPERSONATE").ok(); } pub struct Client { @@ -350,12 +352,12 @@ impl Client { self.set_status(Status::Reauthenticating, cx) } - let mut read_from_keychain = false; + let mut used_keychain = false; let credentials = self.state.read().credentials.clone(); let credentials = if let Some(credentials) = credentials { credentials } else if let Some(credentials) = read_credentials_from_keychain(cx) { - read_from_keychain = true; + used_keychain = true; credentials } else { let credentials = match self.authenticate(&cx).await { @@ -378,7 +380,7 @@ impl Client { Ok(conn) => { log::info!("connected to rpc address {}", *ZED_SERVER_URL); self.state.write().credentials = Some(credentials.clone()); - if !read_from_keychain { + if !used_keychain && IMPERSONATE_LOGIN.is_none() { write_credentials_to_keychain(&credentials, cx).log_err(); } self.set_connection(conn, cx).await; @@ -387,8 +389,8 @@ impl Client { Err(err) => { if matches!(err, EstablishConnectionError::Unauthorized) { self.state.write().credentials.take(); - cx.platform().delete_credentials(&ZED_SERVER_URL).log_err(); - if read_from_keychain { + if used_keychain { + cx.platform().delete_credentials(&ZED_SERVER_URL).log_err(); self.set_status(Status::SignedOut, cx); self.authenticate_and_connect(cx).await } else { @@ -524,10 +526,17 @@ impl Client { // Open the Zed sign-in page in the user's browser, with query parameters that indicate // that the user is signing in from a Zed app running on the same device. - platform.open_url(&format!( + let mut url = format!( "{}/sign_in?native_app_port={}&native_app_public_key={}", *ZED_SERVER_URL, port, public_key_string - )); + ); + + if let Some(impersonate_login) = IMPERSONATE_LOGIN.as_ref() { + log::info!("impersonating user @{}", impersonate_login); + write!(&mut url, "&impersonate={}", impersonate_login).unwrap(); + } + + platform.open_url(&url); // Receive the HTTP request from the user's browser. Retrieve the user id and encrypted // access token from the query params. @@ -611,6 +620,10 @@ impl Client { } fn read_credentials_from_keychain(cx: &AsyncAppContext) -> Option { + if IMPERSONATE_LOGIN.is_some() { + return None; + } + let (user_id, access_token) = cx .platform() .read_credentials(&ZED_SERVER_URL) From 8de9c362c9889395415e9b719322053aeb131293 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 20 Sep 2021 15:44:42 -0700 Subject: [PATCH 16/43] Add .zed.toml --- .zed.toml | 1 + 1 file changed, 1 insertion(+) create mode 100644 .zed.toml diff --git a/.zed.toml b/.zed.toml new file mode 100644 index 0000000000000..6e8c8fe4282e9 --- /dev/null +++ b/.zed.toml @@ -0,0 +1 @@ +collaborators = ["nathansobo", "as-cii", "maxbrunsfeld", "iamnbutler"] From 8f578e7521264229ae0b6a8620a4b4013bc633ed Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 20 Sep 2021 15:45:33 -0700 Subject: [PATCH 17/43] Maintain server state consistency when removing a connection Co-Authored-By: Nathan Sobo --- server/src/rpc.rs | 107 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 101 insertions(+), 6 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index e6a8224c1fe61..961bbffae2a90 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -3,6 +3,7 @@ use super::{ db::{ChannelId, MessageId, UserId}, AppState, }; +use crate::errors::TideResultExt; use anyhow::anyhow; use async_std::{sync::RwLock, task}; use async_tungstenite::{tungstenite::protocol::Role, WebSocketStream}; @@ -49,7 +50,7 @@ pub struct Server { struct ServerState { connections: HashMap, connections_by_user_id: HashMap>, - pub worktrees: HashMap, + worktrees: HashMap, visible_worktrees_by_user_id: HashMap>, channels: HashMap, next_worktree_id: u64, @@ -707,15 +708,19 @@ impl Server { { let worktree = &state.worktrees[worktree_id]; - let mut participants = HashSet::new(); + let mut guests = HashSet::new(); if let Ok(share) = worktree.share() { for guest_connection_id in share.guest_connection_ids.keys() { - let user_id = state.user_id_for_connection(*guest_connection_id)?; - participants.insert(user_id.to_proto()); + let user_id = state + .user_id_for_connection(*guest_connection_id) + .context("stale worktree guest connection")?; + guests.insert(user_id.to_proto()); } } - let host_user_id = state.user_id_for_connection(worktree.host_connection_id)?; + let host_user_id = state + .user_id_for_connection(worktree.host_connection_id) + .context("stale worktree host connection")?; let host = collaborators .entry(host_user_id) @@ -726,7 +731,7 @@ impl Server { host.worktrees.push(proto::WorktreeMetadata { root_name: worktree.root_name.clone(), is_shared: worktree.share().is_ok(), - participants: participants.into_iter().collect(), + participants: guests.into_iter().collect(), }); } @@ -1137,7 +1142,14 @@ impl ServerState { .insert(worktree_id); } self.next_worktree_id += 1; + if let Some(connection) = self.connections.get_mut(&worktree.host_connection_id) { + connection.worktrees.insert(worktree_id); + } self.worktrees.insert(worktree_id, worktree); + + #[cfg(test)] + self.check_invariants(); + worktree_id } @@ -1161,6 +1173,89 @@ impl ServerState { visible_worktrees.remove(&worktree_id); } } + + #[cfg(test)] + self.check_invariants(); + } + + #[cfg(test)] + fn check_invariants(&self) { + for (connection_id, connection) in &self.connections { + for worktree_id in &connection.worktrees { + let worktree = &self.worktrees.get(&worktree_id).unwrap(); + if worktree.host_connection_id != *connection_id { + assert!(worktree + .share() + .unwrap() + .guest_connection_ids + .contains_key(connection_id)); + } + } + for channel_id in &connection.channels { + let channel = self.channels.get(channel_id).unwrap(); + assert!(channel.connection_ids.contains(connection_id)); + } + assert!(self + .connections_by_user_id + .get(&connection.user_id) + .unwrap() + .contains(connection_id)); + } + + for (user_id, connection_ids) in &self.connections_by_user_id { + for connection_id in connection_ids { + assert_eq!( + self.connections.get(connection_id).unwrap().user_id, + *user_id + ); + } + } + + for (worktree_id, worktree) in &self.worktrees { + let host_connection = self.connections.get(&worktree.host_connection_id).unwrap(); + assert!(host_connection.worktrees.contains(worktree_id)); + + for collaborator_id in &worktree.collaborator_user_ids { + let visible_worktree_ids = self + .visible_worktrees_by_user_id + .get(collaborator_id) + .unwrap(); + assert!(visible_worktree_ids.contains(worktree_id)); + } + + if let Some(share) = &worktree.share { + for guest_connection_id in share.guest_connection_ids.keys() { + let guest_connection = self.connections.get(guest_connection_id).unwrap(); + assert!(guest_connection.worktrees.contains(worktree_id)); + } + assert_eq!( + share.active_replica_ids.len(), + share.guest_connection_ids.len(), + ); + assert_eq!( + share.active_replica_ids, + share + .guest_connection_ids + .values() + .copied() + .collect::>(), + ); + } + } + + for (user_id, visible_worktree_ids) in &self.visible_worktrees_by_user_id { + for worktree_id in visible_worktree_ids { + let worktree = self.worktrees.get(worktree_id).unwrap(); + assert!(worktree.collaborator_user_ids.contains(user_id)); + } + } + + for (channel_id, channel) in &self.channels { + for connection_id in &channel.connection_ids { + let connection = self.connections.get(connection_id).unwrap(); + assert!(connection.channels.contains(channel_id)); + } + } } } From 8b1a2c8cd2b51f999547f2ddacacaa33f12e24c3 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 20 Sep 2021 15:45:41 -0700 Subject: [PATCH 18/43] Fix warnings in people_panel --- zed/src/people_panel.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs index 7d935c714e05f..fc459825de043 100644 --- a/zed/src/people_panel.rs +++ b/zed/src/people_panel.rs @@ -1,17 +1,18 @@ -use gpui::{ - elements::*, Element, ElementBox, Entity, ModelHandle, RenderContext, View, ViewContext, -}; -use postage::watch; - use crate::{ theme::Theme, user::{Collaborator, UserStore}, Settings, }; +use gpui::{ + elements::*, Element, ElementBox, Entity, ModelHandle, RenderContext, Subscription, View, + ViewContext, +}; +use postage::watch; pub struct PeoplePanel { collaborators: ListState, user_store: ModelHandle, + _maintain_collaborators: Subscription, } impl PeoplePanel { @@ -20,7 +21,6 @@ impl PeoplePanel { settings: watch::Receiver, cx: &mut ViewContext, ) -> Self { - cx.observe(&user_store, Self::update_collaborators); Self { collaborators: ListState::new( user_store.read(cx).collaborators().len(), @@ -35,6 +35,7 @@ impl PeoplePanel { } }, ), + _maintain_collaborators: cx.observe(&user_store, Self::update_collaborators), user_store, } } @@ -108,7 +109,7 @@ impl View for PeoplePanel { "PeoplePanel" } - fn render(&mut self, cx: &mut RenderContext) -> ElementBox { + fn render(&mut self, _: &mut RenderContext) -> ElementBox { List::new(self.collaborators.clone()).boxed() } } From 5dfd4be174bd8e79523bb33df92217dbb9cc8ef6 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 20 Sep 2021 18:05:46 -0700 Subject: [PATCH 19/43] WIP --- server/src/rpc.rs | 712 +++++++++------------------------------- server/src/rpc/store.rs | 574 ++++++++++++++++++++++++++++++++ zed/src/worktree.rs | 35 +- zrpc/proto/zed.proto | 5 + zrpc/src/proto.rs | 1 + 5 files changed, 753 insertions(+), 574 deletions(-) create mode 100644 server/src/rpc/store.rs diff --git a/server/src/rpc.rs b/server/src/rpc.rs index 961bbffae2a90..009fc9bd1ed8c 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -1,3 +1,5 @@ +mod store; + use super::{ auth, db::{ChannelId, MessageId, UserId}, @@ -8,16 +10,17 @@ use anyhow::anyhow; use async_std::{sync::RwLock, task}; use async_tungstenite::{tungstenite::protocol::Role, WebSocketStream}; use futures::{future::BoxFuture, FutureExt}; -use postage::{mpsc, prelude::Sink as _, prelude::Stream as _}; +use postage::{broadcast, mpsc, prelude::Sink as _, prelude::Stream as _}; use sha1::{Digest as _, Sha1}; use std::{ any::TypeId, - collections::{hash_map, HashMap, HashSet}, + collections::{HashMap, HashSet}, future::Future, mem, sync::Arc, time::Instant, }; +use store::{ReplicaId, Store, Worktree}; use surf::StatusCode; use tide::log; use tide::{ @@ -30,8 +33,6 @@ use zrpc::{ Connection, ConnectionId, Peer, TypedEnvelope, }; -type ReplicaId = u16; - type MessageHandler = Box< dyn Send + Sync @@ -40,46 +41,12 @@ type MessageHandler = Box< pub struct Server { peer: Arc, - state: RwLock, + store: RwLock, app_state: Arc, handlers: HashMap, notifications: Option>, } -#[derive(Default)] -struct ServerState { - connections: HashMap, - connections_by_user_id: HashMap>, - worktrees: HashMap, - visible_worktrees_by_user_id: HashMap>, - channels: HashMap, - next_worktree_id: u64, -} - -struct ConnectionState { - user_id: UserId, - worktrees: HashSet, - channels: HashSet, -} - -struct Worktree { - host_connection_id: ConnectionId, - collaborator_user_ids: Vec, - root_name: String, - share: Option, -} - -struct WorktreeShare { - guest_connection_ids: HashMap, - active_replica_ids: HashSet, - entries: HashMap, -} - -#[derive(Default)] -struct Channel { - connection_ids: HashSet, -} - const MESSAGE_COUNT_PER_PAGE: usize = 100; const MAX_MESSAGE_LEN: usize = 1024; @@ -92,7 +59,7 @@ impl Server { let mut server = Self { peer, app_state, - state: Default::default(), + store: Default::default(), handlers: Default::default(), notifications, }; @@ -100,7 +67,7 @@ impl Server { server .add_handler(Server::ping) .add_handler(Server::open_worktree) - .add_handler(Server::handle_close_worktree) + .add_handler(Server::close_worktree) .add_handler(Server::share_worktree) .add_handler(Server::unshare_worktree) .add_handler(Server::join_worktree) @@ -149,7 +116,10 @@ impl Server { async move { let (connection_id, handle_io, mut incoming_rx) = this.peer.add_connection(connection).await; - this.add_connection(connection_id, user_id).await; + this.store + .write() + .await + .add_connection(connection_id, user_id); if let Err(err) = this.update_collaborators_for_users(&[user_id]).await { log::error!("error updating collaborators for {:?}: {}", user_id, err); } @@ -197,61 +167,40 @@ impl Server { } } - async fn sign_out(self: &Arc, connection_id: zrpc::ConnectionId) -> tide::Result<()> { + async fn sign_out(self: &Arc, connection_id: ConnectionId) -> tide::Result<()> { self.peer.disconnect(connection_id).await; - self.remove_connection(connection_id).await?; - Ok(()) - } + let removed_connection = self.store.write().await.remove_connection(connection_id)?; - // Add a new connection associated with a given user. - async fn add_connection(&self, connection_id: ConnectionId, user_id: UserId) { - let mut state = self.state.write().await; - state.connections.insert( - connection_id, - ConnectionState { - user_id, - worktrees: Default::default(), - channels: Default::default(), - }, - ); - state - .connections_by_user_id - .entry(user_id) - .or_default() - .insert(connection_id); - } - - // Remove the given connection and its association with any worktrees. - async fn remove_connection( - self: &Arc, - connection_id: ConnectionId, - ) -> tide::Result<()> { - let mut worktree_ids = Vec::new(); - let mut state = self.state.write().await; - if let Some(connection) = state.connections.remove(&connection_id) { - worktree_ids = connection.worktrees.into_iter().collect(); - - for channel_id in connection.channels { - if let Some(channel) = state.channels.get_mut(&channel_id) { - channel.connection_ids.remove(&connection_id); - } - } - - let user_connections = state - .connections_by_user_id - .get_mut(&connection.user_id) - .unwrap(); - user_connections.remove(&connection_id); - if user_connections.is_empty() { - state.connections_by_user_id.remove(&connection.user_id); + for (worktree_id, worktree) in removed_connection.hosted_worktrees { + if let Some(share) = worktree.share { + broadcast( + connection_id, + share.guest_connection_ids.keys().copied().collect(), + |conn_id| { + self.peer + .send(conn_id, proto::UnshareWorktree { worktree_id }) + }, + ) + .await?; } } - drop(state); - for worktree_id in worktree_ids { - self.close_worktree(worktree_id, connection_id).await?; + for (worktree_id, peer_ids) in removed_connection.guest_worktree_ids { + broadcast(connection_id, peer_ids, |conn_id| { + self.peer.send( + conn_id, + proto::RemovePeer { + worktree_id, + peer_id: connection_id.0, + }, + ) + }) + .await?; } + self.update_collaborators_for_users(removed_connection.collaborator_ids.iter()) + .await; + Ok(()) } @@ -266,7 +215,7 @@ impl Server { ) -> tide::Result<()> { let receipt = request.receipt(); let host_user_id = self - .state + .store .read() .await .user_id_for_connection(request.sender_id)?; @@ -289,7 +238,7 @@ impl Server { } let collaborator_user_ids = collaborator_user_ids.into_iter().collect::>(); - let worktree_id = self.state.write().await.add_worktree(Worktree { + let worktree_id = self.store.write().await.add_worktree(Worktree { host_connection_id: request.sender_id, collaborator_user_ids: collaborator_user_ids.clone(), root_name: request.payload.root_name, @@ -305,6 +254,33 @@ impl Server { Ok(()) } + async fn close_worktree( + self: Arc, + request: TypedEnvelope, + ) -> tide::Result<()> { + let worktree_id = request.payload.worktree_id; + let worktree = self + .store + .write() + .await + .remove_worktree(worktree_id, request.sender_id)?; + + if let Some(share) = worktree.share { + broadcast( + request.sender_id, + share.guest_connection_ids.keys().copied().collect(), + |conn_id| { + self.peer + .send(conn_id, proto::UnshareWorktree { worktree_id }) + }, + ) + .await?; + } + self.update_collaborators_for_users(&worktree.collaborator_user_ids) + .await?; + Ok(()) + } + async fn share_worktree( self: Arc, mut request: TypedEnvelope, @@ -319,16 +295,12 @@ impl Server { .map(|entry| (entry.id, entry)) .collect(); - let mut state = self.state.write().await; - if let Some(worktree) = state.worktrees.get_mut(&worktree.id) { - worktree.share = Some(WorktreeShare { - guest_connection_ids: Default::default(), - active_replica_ids: Default::default(), - entries, - }); - let collaborator_user_ids = worktree.collaborator_user_ids.clone(); - - drop(state); + if let Some(collaborator_user_ids) = + self.store + .write() + .await + .share_worktree(worktree.id, request.sender_id, entries) + { self.peer .respond(request.receipt(), proto::ShareWorktreeResponse {}) .await?; @@ -352,26 +324,11 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let worktree_id = request.payload.worktree_id; - - let connection_ids; - let collaborator_user_ids; - { - let mut state = self.state.write().await; - let worktree = state.write_worktree(worktree_id, request.sender_id)?; - if worktree.host_connection_id != request.sender_id { - return Err(anyhow!("no such worktree"))?; - } - - connection_ids = worktree.connection_ids(); - collaborator_user_ids = worktree.collaborator_user_ids.clone(); - - worktree.share.take(); - for connection_id in &connection_ids { - if let Some(connection) = state.connections.get_mut(connection_id) { - connection.worktrees.remove(&worktree_id); - } - } - } + let (connection_ids, collaborator_user_ids) = self + .store + .write() + .await + .unshare_worktree(worktree_id, request.sender_id)?; broadcast(request.sender_id, connection_ids, |conn_id| { self.peer @@ -390,7 +347,7 @@ impl Server { ) -> tide::Result<()> { let worktree_id = request.payload.worktree_id; let user_id = self - .state + .store .read() .await .user_id_for_connection(request.sender_id)?; @@ -398,7 +355,7 @@ impl Server { let response; let connection_ids; let collaborator_user_ids; - let mut state = self.state.write().await; + let mut state = self.store.write().await; match state.join_worktree(request.sender_id, user_id, worktree_id) { Ok((peer_replica_id, worktree)) => { let share = worktree.share()?; @@ -462,48 +419,17 @@ impl Server { Ok(()) } - async fn handle_close_worktree( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - self.close_worktree(request.payload.worktree_id, request.sender_id) - .await - } - - async fn close_worktree( + async fn leave_worktree( self: &Arc, worktree_id: u64, sender_conn_id: ConnectionId, ) -> tide::Result<()> { - let connection_ids; - let collaborator_user_ids; - let mut is_host = false; - let mut is_guest = false; + if let Some((connection_ids, collaborator_ids)) = self + .store + .write() + .await + .leave_worktree(sender_conn_id, worktree_id) { - let mut state = self.state.write().await; - let worktree = state.write_worktree(worktree_id, sender_conn_id)?; - connection_ids = worktree.connection_ids(); - collaborator_user_ids = worktree.collaborator_user_ids.clone(); - - if worktree.host_connection_id == sender_conn_id { - is_host = true; - state.remove_worktree(worktree_id); - } else { - let share = worktree.share_mut()?; - if let Some(replica_id) = share.guest_connection_ids.remove(&sender_conn_id) { - is_guest = true; - share.active_replica_ids.remove(&replica_id); - } - } - } - - if is_host { - broadcast(sender_conn_id, connection_ids, |conn_id| { - self.peer - .send(conn_id, proto::UnshareWorktree { worktree_id }) - }) - .await?; - } else if is_guest { broadcast(sender_conn_id, connection_ids, |conn_id| { self.peer.send( conn_id, @@ -513,10 +439,10 @@ impl Server { }, ) }) - .await? - } - self.update_collaborators_for_users(&collaborator_user_ids) .await?; + self.update_collaborators_for_users(&collaborator_ids) + .await?; + } Ok(()) } @@ -524,22 +450,19 @@ impl Server { self: Arc, request: TypedEnvelope, ) -> tide::Result<()> { - { - let mut state = self.state.write().await; - let worktree = state.write_worktree(request.payload.worktree_id, request.sender_id)?; - let share = worktree.share_mut()?; - - for entry_id in &request.payload.removed_entries { - share.entries.remove(&entry_id); - } - - for entry in &request.payload.updated_entries { - share.entries.insert(entry.id, entry.clone()); - } - } + let connection_ids = self.store.write().await.update_worktree( + request.sender_id, + request.payload.worktree_id, + &request.payload.removed_entries, + &request.payload.updated_entries, + )?; + + broadcast(request.sender_id, connection_ids, |connection_id| { + self.peer + .forward_send(request.sender_id, connection_id, request.payload.clone()) + }) + .await?; - self.broadcast_in_worktree(request.payload.worktree_id, &request) - .await?; Ok(()) } @@ -548,14 +471,11 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let receipt = request.receipt(); - let worktree_id = request.payload.worktree_id; let host_connection_id = self - .state + .store .read() .await - .read_worktree(worktree_id, request.sender_id)? - .host_connection_id; - + .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?; let response = self .peer .forward_request(request.sender_id, host_connection_id, request.payload) @@ -569,16 +489,13 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let host_connection_id = self - .state + .store .read() .await - .read_worktree(request.payload.worktree_id, request.sender_id)? - .host_connection_id; - + .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?; self.peer .forward_send(request.sender_id, host_connection_id, request.payload) .await?; - Ok(()) } @@ -589,15 +506,11 @@ impl Server { let host; let guests; { - let state = self.state.read().await; - let worktree = state.read_worktree(request.payload.worktree_id, request.sender_id)?; - host = worktree.host_connection_id; - guests = worktree - .share()? - .guest_connection_ids - .keys() - .copied() - .collect::>(); + let state = self.store.read().await; + host = state + .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?; + guests = state + .worktree_guest_connection_ids(request.sender_id, request.payload.worktree_id)?; } let sender = request.sender_id; @@ -627,8 +540,18 @@ impl Server { self: Arc, request: TypedEnvelope, ) -> tide::Result<()> { - self.broadcast_in_worktree(request.payload.worktree_id, &request) - .await?; + broadcast( + request.sender_id, + self.store + .read() + .await + .worktree_connection_ids(request.sender_id, request.payload.worktree_id)?, + |connection_id| { + self.peer + .forward_send(request.sender_id, connection_id, request.payload.clone()) + }, + ) + .await?; self.peer.respond(request.receipt(), proto::Ack {}).await?; Ok(()) } @@ -637,8 +560,19 @@ impl Server { self: Arc, request: TypedEnvelope, ) -> tide::Result<()> { - self.broadcast_in_worktree(request.payload.worktree_id, &request) - .await + broadcast( + request.sender_id, + self.store + .read() + .await + .worktree_connection_ids(request.sender_id, request.payload.worktree_id)?, + |connection_id| { + self.peer + .forward_send(request.sender_id, connection_id, request.payload.clone()) + }, + ) + .await?; + Ok(()) } async fn get_channels( @@ -646,7 +580,7 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let user_id = self - .state + .store .read() .await .user_id_for_connection(request.sender_id)?; @@ -698,45 +632,10 @@ impl Server { ) -> tide::Result<()> { let mut send_futures = Vec::new(); - let state = self.state.read().await; + let state = self.store.read().await; for user_id in user_ids { - let mut collaborators = HashMap::new(); - for worktree_id in state - .visible_worktrees_by_user_id - .get(&user_id) - .unwrap_or(&HashSet::new()) - { - let worktree = &state.worktrees[worktree_id]; - - let mut guests = HashSet::new(); - if let Ok(share) = worktree.share() { - for guest_connection_id in share.guest_connection_ids.keys() { - let user_id = state - .user_id_for_connection(*guest_connection_id) - .context("stale worktree guest connection")?; - guests.insert(user_id.to_proto()); - } - } - - let host_user_id = state - .user_id_for_connection(worktree.host_connection_id) - .context("stale worktree host connection")?; - let host = - collaborators - .entry(host_user_id) - .or_insert_with(|| proto::Collaborator { - user_id: host_user_id.to_proto(), - worktrees: Vec::new(), - }); - host.worktrees.push(proto::WorktreeMetadata { - root_name: worktree.root_name.clone(), - is_shared: worktree.share().is_ok(), - participants: guests.into_iter().collect(), - }); - } - - let collaborators = collaborators.into_values().collect::>(); - for connection_id in state.user_connection_ids(*user_id) { + let collaborators = state.collaborators_for_user(*user_id); + for connection_id in state.connection_ids_for_user(*user_id) { send_futures.push(self.peer.send( connection_id, proto::UpdateCollaborators { @@ -757,7 +656,7 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let user_id = self - .state + .store .read() .await .user_id_for_connection(request.sender_id)?; @@ -771,7 +670,7 @@ impl Server { Err(anyhow!("access denied"))?; } - self.state + self.store .write() .await .join_channel(request.sender_id, channel_id); @@ -806,7 +705,7 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let user_id = self - .state + .store .read() .await .user_id_for_connection(request.sender_id)?; @@ -820,7 +719,7 @@ impl Server { Err(anyhow!("access denied"))?; } - self.state + self.store .write() .await .leave_channel(request.sender_id, channel_id); @@ -837,10 +736,10 @@ impl Server { let user_id; let connection_ids; { - let state = self.state.read().await; + let state = self.store.read().await; user_id = state.user_id_for_connection(request.sender_id)?; - if let Some(channel) = state.channels.get(&channel_id) { - connection_ids = channel.connection_ids(); + if let Some(ids) = state.channel_connection_ids(channel_id) { + connection_ids = ids; } else { return Ok(()); } @@ -925,7 +824,7 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let user_id = self - .state + .store .read() .await .user_id_for_connection(request.sender_id)?; @@ -968,27 +867,6 @@ impl Server { .await?; Ok(()) } - - async fn broadcast_in_worktree( - &self, - worktree_id: u64, - message: &TypedEnvelope, - ) -> tide::Result<()> { - let connection_ids = self - .state - .read() - .await - .read_worktree(worktree_id, message.sender_id)? - .connection_ids(); - - broadcast(message.sender_id, connection_ids, |conn_id| { - self.peer - .forward_send(message.sender_id, conn_id, message.payload.clone()) - }) - .await?; - - Ok(()) - } } pub async fn broadcast( @@ -1008,292 +886,6 @@ where Ok(()) } -impl ServerState { - fn join_channel(&mut self, connection_id: ConnectionId, channel_id: ChannelId) { - if let Some(connection) = self.connections.get_mut(&connection_id) { - connection.channels.insert(channel_id); - self.channels - .entry(channel_id) - .or_default() - .connection_ids - .insert(connection_id); - } - } - - fn leave_channel(&mut self, connection_id: ConnectionId, channel_id: ChannelId) { - if let Some(connection) = self.connections.get_mut(&connection_id) { - connection.channels.remove(&channel_id); - if let hash_map::Entry::Occupied(mut entry) = self.channels.entry(channel_id) { - entry.get_mut().connection_ids.remove(&connection_id); - if entry.get_mut().connection_ids.is_empty() { - entry.remove(); - } - } - } - } - - fn user_id_for_connection(&self, connection_id: ConnectionId) -> tide::Result { - Ok(self - .connections - .get(&connection_id) - .ok_or_else(|| anyhow!("unknown connection"))? - .user_id) - } - - fn user_connection_ids<'a>( - &'a self, - user_id: UserId, - ) -> impl 'a + Iterator { - self.connections_by_user_id - .get(&user_id) - .into_iter() - .flatten() - .copied() - } - - // Add the given connection as a guest of the given worktree - fn join_worktree( - &mut self, - connection_id: ConnectionId, - user_id: UserId, - worktree_id: u64, - ) -> tide::Result<(ReplicaId, &Worktree)> { - let connection = self - .connections - .get_mut(&connection_id) - .ok_or_else(|| anyhow!("no such connection"))?; - let worktree = self - .worktrees - .get_mut(&worktree_id) - .ok_or_else(|| anyhow!("no such worktree"))?; - if !worktree.collaborator_user_ids.contains(&user_id) { - Err(anyhow!("no such worktree"))?; - } - - let share = worktree.share_mut()?; - connection.worktrees.insert(worktree_id); - - let mut replica_id = 1; - while share.active_replica_ids.contains(&replica_id) { - replica_id += 1; - } - share.active_replica_ids.insert(replica_id); - share.guest_connection_ids.insert(connection_id, replica_id); - return Ok((replica_id, worktree)); - } - - fn read_worktree( - &self, - worktree_id: u64, - connection_id: ConnectionId, - ) -> tide::Result<&Worktree> { - let worktree = self - .worktrees - .get(&worktree_id) - .ok_or_else(|| anyhow!("worktree not found"))?; - - if worktree.host_connection_id == connection_id - || worktree - .share()? - .guest_connection_ids - .contains_key(&connection_id) - { - Ok(worktree) - } else { - Err(anyhow!( - "{} is not a member of worktree {}", - connection_id, - worktree_id - ))? - } - } - - fn write_worktree( - &mut self, - worktree_id: u64, - connection_id: ConnectionId, - ) -> tide::Result<&mut Worktree> { - let worktree = self - .worktrees - .get_mut(&worktree_id) - .ok_or_else(|| anyhow!("worktree not found"))?; - - if worktree.host_connection_id == connection_id - || worktree.share.as_ref().map_or(false, |share| { - share.guest_connection_ids.contains_key(&connection_id) - }) - { - Ok(worktree) - } else { - Err(anyhow!( - "{} is not a member of worktree {}", - connection_id, - worktree_id - ))? - } - } - - fn add_worktree(&mut self, worktree: Worktree) -> u64 { - let worktree_id = self.next_worktree_id; - for collaborator_user_id in &worktree.collaborator_user_ids { - self.visible_worktrees_by_user_id - .entry(*collaborator_user_id) - .or_default() - .insert(worktree_id); - } - self.next_worktree_id += 1; - if let Some(connection) = self.connections.get_mut(&worktree.host_connection_id) { - connection.worktrees.insert(worktree_id); - } - self.worktrees.insert(worktree_id, worktree); - - #[cfg(test)] - self.check_invariants(); - - worktree_id - } - - fn remove_worktree(&mut self, worktree_id: u64) { - let worktree = self.worktrees.remove(&worktree_id).unwrap(); - if let Some(connection) = self.connections.get_mut(&worktree.host_connection_id) { - connection.worktrees.remove(&worktree_id); - } - if let Some(share) = worktree.share { - for connection_id in share.guest_connection_ids.keys() { - if let Some(connection) = self.connections.get_mut(connection_id) { - connection.worktrees.remove(&worktree_id); - } - } - } - for collaborator_user_id in worktree.collaborator_user_ids { - if let Some(visible_worktrees) = self - .visible_worktrees_by_user_id - .get_mut(&collaborator_user_id) - { - visible_worktrees.remove(&worktree_id); - } - } - - #[cfg(test)] - self.check_invariants(); - } - - #[cfg(test)] - fn check_invariants(&self) { - for (connection_id, connection) in &self.connections { - for worktree_id in &connection.worktrees { - let worktree = &self.worktrees.get(&worktree_id).unwrap(); - if worktree.host_connection_id != *connection_id { - assert!(worktree - .share() - .unwrap() - .guest_connection_ids - .contains_key(connection_id)); - } - } - for channel_id in &connection.channels { - let channel = self.channels.get(channel_id).unwrap(); - assert!(channel.connection_ids.contains(connection_id)); - } - assert!(self - .connections_by_user_id - .get(&connection.user_id) - .unwrap() - .contains(connection_id)); - } - - for (user_id, connection_ids) in &self.connections_by_user_id { - for connection_id in connection_ids { - assert_eq!( - self.connections.get(connection_id).unwrap().user_id, - *user_id - ); - } - } - - for (worktree_id, worktree) in &self.worktrees { - let host_connection = self.connections.get(&worktree.host_connection_id).unwrap(); - assert!(host_connection.worktrees.contains(worktree_id)); - - for collaborator_id in &worktree.collaborator_user_ids { - let visible_worktree_ids = self - .visible_worktrees_by_user_id - .get(collaborator_id) - .unwrap(); - assert!(visible_worktree_ids.contains(worktree_id)); - } - - if let Some(share) = &worktree.share { - for guest_connection_id in share.guest_connection_ids.keys() { - let guest_connection = self.connections.get(guest_connection_id).unwrap(); - assert!(guest_connection.worktrees.contains(worktree_id)); - } - assert_eq!( - share.active_replica_ids.len(), - share.guest_connection_ids.len(), - ); - assert_eq!( - share.active_replica_ids, - share - .guest_connection_ids - .values() - .copied() - .collect::>(), - ); - } - } - - for (user_id, visible_worktree_ids) in &self.visible_worktrees_by_user_id { - for worktree_id in visible_worktree_ids { - let worktree = self.worktrees.get(worktree_id).unwrap(); - assert!(worktree.collaborator_user_ids.contains(user_id)); - } - } - - for (channel_id, channel) in &self.channels { - for connection_id in &channel.connection_ids { - let connection = self.connections.get(connection_id).unwrap(); - assert!(connection.channels.contains(channel_id)); - } - } - } -} - -impl Worktree { - pub fn connection_ids(&self) -> Vec { - if let Some(share) = &self.share { - share - .guest_connection_ids - .keys() - .copied() - .chain(Some(self.host_connection_id)) - .collect() - } else { - vec![self.host_connection_id] - } - } - - fn share(&self) -> tide::Result<&WorktreeShare> { - Ok(self - .share - .as_ref() - .ok_or_else(|| anyhow!("worktree is not shared"))?) - } - - fn share_mut(&mut self) -> tide::Result<&mut WorktreeShare> { - Ok(self - .share - .as_mut() - .ok_or_else(|| anyhow!("worktree is not shared"))?) - } -} - -impl Channel { - fn connection_ids(&self) -> Vec { - self.connection_ids.iter().copied().collect() - } -} - pub fn add_routes(app: &mut tide::Server>, rpc: &Arc) { let server = Server::new(app.state().clone(), rpc.clone(), None); app.at("/rpc").with(auth::VerifyToken).get(move |request: Request>| { @@ -2477,16 +2069,16 @@ mod tests { }) } - async fn state<'a>(&'a self) -> RwLockReadGuard<'a, ServerState> { - self.server.state.read().await + async fn state<'a>(&'a self) -> RwLockReadGuard<'a, Store> { + self.server.store.read().await } async fn condition(&mut self, mut predicate: F) where - F: FnMut(&ServerState) -> bool, + F: FnMut(&Store) -> bool, { async_std::future::timeout(Duration::from_millis(500), async { - while !(predicate)(&*self.server.state.read().await) { + while !(predicate)(&*self.server.store.read().await) { self.notifications.recv().await; } }) diff --git a/server/src/rpc/store.rs b/server/src/rpc/store.rs new file mode 100644 index 0000000000000..c7a6c2b166d71 --- /dev/null +++ b/server/src/rpc/store.rs @@ -0,0 +1,574 @@ +use crate::db::{ChannelId, MessageId, UserId}; +use crate::errors::TideResultExt; +use anyhow::anyhow; +use std::collections::{hash_map, HashMap, HashSet}; +use zrpc::{proto, ConnectionId}; + +#[derive(Default)] +pub struct Store { + connections: HashMap, + connections_by_user_id: HashMap>, + worktrees: HashMap, + visible_worktrees_by_user_id: HashMap>, + channels: HashMap, + next_worktree_id: u64, +} + +struct ConnectionState { + user_id: UserId, + worktrees: HashSet, + channels: HashSet, +} + +pub struct Worktree { + pub host_connection_id: ConnectionId, + pub collaborator_user_ids: Vec, + pub root_name: String, + pub share: Option, +} + +struct WorktreeShare { + pub guest_connection_ids: HashMap, + pub active_replica_ids: HashSet, + pub entries: HashMap, +} + +#[derive(Default)] +struct Channel { + connection_ids: HashSet, +} + +pub type ReplicaId = u16; + +#[derive(Default)] +pub struct RemovedConnectionState { + pub hosted_worktrees: HashMap, + pub guest_worktree_ids: HashMap>, + pub collaborator_ids: HashSet, +} + +impl Store { + pub fn add_connection(&mut self, connection_id: ConnectionId, user_id: UserId) { + self.connections.insert( + connection_id, + ConnectionState { + user_id, + worktrees: Default::default(), + channels: Default::default(), + }, + ); + self.connections_by_user_id + .entry(user_id) + .or_default() + .insert(connection_id); + } + + pub fn remove_connection( + &mut self, + connection_id: ConnectionId, + ) -> tide::Result { + let connection = if let Some(connection) = self.connections.get(&connection_id) { + connection + } else { + return Err(anyhow!("no such connection"))?; + }; + + for channel_id in connection.channels { + if let Some(channel) = self.channels.get_mut(&channel_id) { + channel.connection_ids.remove(&connection_id); + } + } + + let user_connections = self + .connections_by_user_id + .get_mut(&connection.user_id) + .unwrap(); + user_connections.remove(&connection_id); + if user_connections.is_empty() { + self.connections_by_user_id.remove(&connection.user_id); + } + + let mut result = RemovedConnectionState::default(); + for worktree_id in connection.worktrees { + if let Ok(worktree) = self.remove_worktree(worktree_id, connection_id) { + result.hosted_worktrees.insert(worktree_id, worktree); + result + .collaborator_ids + .extend(worktree.collaborator_user_ids.iter().copied()); + } else { + if let Some(worktree) = self.worktrees.get(&worktree_id) { + result + .guest_worktree_ids + .insert(worktree_id, worktree.connection_ids()); + result + .collaborator_ids + .extend(worktree.collaborator_user_ids.iter().copied()); + } + } + } + + Ok(result) + } + + pub fn join_channel(&mut self, connection_id: ConnectionId, channel_id: ChannelId) { + if let Some(connection) = self.connections.get_mut(&connection_id) { + connection.channels.insert(channel_id); + self.channels + .entry(channel_id) + .or_default() + .connection_ids + .insert(connection_id); + } + } + + pub fn leave_channel(&mut self, connection_id: ConnectionId, channel_id: ChannelId) { + if let Some(connection) = self.connections.get_mut(&connection_id) { + connection.channels.remove(&channel_id); + if let hash_map::Entry::Occupied(mut entry) = self.channels.entry(channel_id) { + entry.get_mut().connection_ids.remove(&connection_id); + if entry.get_mut().connection_ids.is_empty() { + entry.remove(); + } + } + } + } + + pub fn user_id_for_connection(&self, connection_id: ConnectionId) -> tide::Result { + Ok(self + .connections + .get(&connection_id) + .ok_or_else(|| anyhow!("unknown connection"))? + .user_id) + } + + pub fn connection_ids_for_user<'a>( + &'a self, + user_id: UserId, + ) -> impl 'a + Iterator { + self.connections_by_user_id + .get(&user_id) + .into_iter() + .flatten() + .copied() + } + + pub fn collaborators_for_user(&self, user_id: UserId) -> Vec { + let mut collaborators = HashMap::new(); + for worktree_id in self + .visible_worktrees_by_user_id + .get(&user_id) + .unwrap_or(&HashSet::new()) + { + let worktree = &self.worktrees[worktree_id]; + + let mut guests = HashSet::new(); + if let Ok(share) = worktree.share() { + for guest_connection_id in share.guest_connection_ids.keys() { + if let Ok(user_id) = self.user_id_for_connection(*guest_connection_id) { + guests.insert(user_id.to_proto()); + } + } + } + + if let Ok(host_user_id) = self + .user_id_for_connection(worktree.host_connection_id) + .context("stale worktree host connection") + { + let host = + collaborators + .entry(host_user_id) + .or_insert_with(|| proto::Collaborator { + user_id: host_user_id.to_proto(), + worktrees: Vec::new(), + }); + host.worktrees.push(proto::WorktreeMetadata { + root_name: worktree.root_name.clone(), + is_shared: worktree.share().is_ok(), + participants: guests.into_iter().collect(), + }); + } + } + + collaborators.into_values().collect() + } + + pub fn add_worktree(&mut self, worktree: Worktree) -> u64 { + let worktree_id = self.next_worktree_id; + for collaborator_user_id in &worktree.collaborator_user_ids { + self.visible_worktrees_by_user_id + .entry(*collaborator_user_id) + .or_default() + .insert(worktree_id); + } + self.next_worktree_id += 1; + if let Some(connection) = self.connections.get_mut(&worktree.host_connection_id) { + connection.worktrees.insert(worktree_id); + } + self.worktrees.insert(worktree_id, worktree); + + #[cfg(test)] + self.check_invariants(); + + worktree_id + } + + pub fn remove_worktree( + &mut self, + worktree_id: u64, + acting_connection_id: ConnectionId, + ) -> tide::Result { + let worktree = if let hash_map::Entry::Occupied(e) = self.worktrees.entry(worktree_id) { + if e.get().host_connection_id != acting_connection_id { + Err(anyhow!("not your worktree"))?; + } + e.remove() + } else { + return Err(anyhow!("no such worktree"))?; + }; + + if let Some(connection) = self.connections.get_mut(&worktree.host_connection_id) { + connection.worktrees.remove(&worktree_id); + } + + if let Some(share) = worktree.share { + for connection_id in share.guest_connection_ids.keys() { + if let Some(connection) = self.connections.get_mut(connection_id) { + connection.worktrees.remove(&worktree_id); + } + } + } + + for collaborator_user_id in worktree.collaborator_user_ids { + if let Some(visible_worktrees) = self + .visible_worktrees_by_user_id + .get_mut(&collaborator_user_id) + { + visible_worktrees.remove(&worktree_id); + } + } + + #[cfg(test)] + self.check_invariants(); + + Ok(worktree) + } + + pub fn share_worktree( + &mut self, + worktree_id: u64, + connection_id: ConnectionId, + entries: HashMap, + ) -> Option> { + if let Some(worktree) = self.worktrees.get_mut(&worktree_id) { + if worktree.host_connection_id == connection_id { + worktree.share = Some(WorktreeShare { + guest_connection_ids: Default::default(), + active_replica_ids: Default::default(), + entries, + }); + return Some(worktree.collaborator_user_ids.clone()); + } + } + None + } + + pub fn unshare_worktree( + &mut self, + worktree_id: u64, + acting_connection_id: ConnectionId, + ) -> tide::Result<(Vec, Vec)> { + let worktree = if let Some(worktree) = self.worktrees.get_mut(&worktree_id) { + worktree + } else { + return Err(anyhow!("no such worktree"))?; + }; + + if worktree.host_connection_id != acting_connection_id { + return Err(anyhow!("not your worktree"))?; + } + + let connection_ids = worktree.connection_ids(); + + if let Some(share) = worktree.share.take() { + for connection_id in &connection_ids { + if let Some(connection) = self.connections.get_mut(connection_id) { + connection.worktrees.remove(&worktree_id); + } + } + Ok((connection_ids, worktree.collaborator_user_ids.clone())) + } else { + Err(anyhow!("worktree is not shared"))? + } + } + + pub fn join_worktree( + &mut self, + connection_id: ConnectionId, + user_id: UserId, + worktree_id: u64, + ) -> tide::Result<(ReplicaId, &Worktree)> { + let connection = self + .connections + .get_mut(&connection_id) + .ok_or_else(|| anyhow!("no such connection"))?; + let worktree = self + .worktrees + .get_mut(&worktree_id) + .and_then(|worktree| { + if worktree.collaborator_user_ids.contains(&user_id) { + Some(worktree) + } else { + None + } + }) + .ok_or_else(|| anyhow!("no such worktree"))?; + + let share = worktree.share_mut()?; + connection.worktrees.insert(worktree_id); + + let mut replica_id = 1; + while share.active_replica_ids.contains(&replica_id) { + replica_id += 1; + } + share.active_replica_ids.insert(replica_id); + share.guest_connection_ids.insert(connection_id, replica_id); + return Ok((replica_id, worktree)); + } + + pub fn leave_worktree( + &mut self, + connection_id: ConnectionId, + worktree_id: u64, + ) -> Option<(Vec, Vec)> { + let worktree = self.worktrees.get_mut(&worktree_id)?; + let share = worktree.share.as_mut()?; + let replica_id = share.guest_connection_ids.remove(&connection_id)?; + share.active_replica_ids.remove(&replica_id); + Some(( + worktree.connection_ids(), + worktree.collaborator_user_ids.clone(), + )) + } + + pub fn update_worktree( + &mut self, + connection_id: ConnectionId, + worktree_id: u64, + removed_entries: &[u64], + updated_entries: &[proto::Entry], + ) -> tide::Result> { + let worktree = self.write_worktree(worktree_id, connection_id)?; + let share = worktree.share_mut()?; + for entry_id in removed_entries { + share.entries.remove(&entry_id); + } + for entry in updated_entries { + share.entries.insert(entry.id, entry.clone()); + } + Ok(worktree.connection_ids()) + } + + pub fn worktree_host_connection_id( + &self, + connection_id: ConnectionId, + worktree_id: u64, + ) -> tide::Result { + Ok(self + .read_worktree(worktree_id, connection_id)? + .host_connection_id) + } + + pub fn worktree_guest_connection_ids( + &self, + connection_id: ConnectionId, + worktree_id: u64, + ) -> tide::Result> { + Ok(self + .read_worktree(worktree_id, connection_id)? + .share()? + .guest_connection_ids + .keys() + .copied() + .collect()) + } + + pub fn worktree_connection_ids( + &self, + connection_id: ConnectionId, + worktree_id: u64, + ) -> tide::Result> { + Ok(self + .read_worktree(worktree_id, connection_id)? + .connection_ids()) + } + + pub fn channel_connection_ids(&self, channel_id: ChannelId) -> Option> { + Some(self.channels.get(&channel_id)?.connection_ids()) + } + + fn read_worktree( + &self, + worktree_id: u64, + connection_id: ConnectionId, + ) -> tide::Result<&Worktree> { + let worktree = self + .worktrees + .get(&worktree_id) + .ok_or_else(|| anyhow!("worktree not found"))?; + + if worktree.host_connection_id == connection_id + || worktree + .share()? + .guest_connection_ids + .contains_key(&connection_id) + { + Ok(worktree) + } else { + Err(anyhow!( + "{} is not a member of worktree {}", + connection_id, + worktree_id + ))? + } + } + + fn write_worktree( + &mut self, + worktree_id: u64, + connection_id: ConnectionId, + ) -> tide::Result<&mut Worktree> { + let worktree = self + .worktrees + .get_mut(&worktree_id) + .ok_or_else(|| anyhow!("worktree not found"))?; + + if worktree.host_connection_id == connection_id + || worktree.share.as_ref().map_or(false, |share| { + share.guest_connection_ids.contains_key(&connection_id) + }) + { + Ok(worktree) + } else { + Err(anyhow!( + "{} is not a member of worktree {}", + connection_id, + worktree_id + ))? + } + } + + #[cfg(test)] + fn check_invariants(&self) { + for (connection_id, connection) in &self.connections { + for worktree_id in &connection.worktrees { + let worktree = &self.worktrees.get(&worktree_id).unwrap(); + if worktree.host_connection_id != *connection_id { + assert!(worktree + .share() + .unwrap() + .guest_connection_ids + .contains_key(connection_id)); + } + } + for channel_id in &connection.channels { + let channel = self.channels.get(channel_id).unwrap(); + assert!(channel.connection_ids.contains(connection_id)); + } + assert!(self + .connections_by_user_id + .get(&connection.user_id) + .unwrap() + .contains(connection_id)); + } + + for (user_id, connection_ids) in &self.connections_by_user_id { + for connection_id in connection_ids { + assert_eq!( + self.connections.get(connection_id).unwrap().user_id, + *user_id + ); + } + } + + for (worktree_id, worktree) in &self.worktrees { + let host_connection = self.connections.get(&worktree.host_connection_id).unwrap(); + assert!(host_connection.worktrees.contains(worktree_id)); + + for collaborator_id in &worktree.collaborator_user_ids { + let visible_worktree_ids = self + .visible_worktrees_by_user_id + .get(collaborator_id) + .unwrap(); + assert!(visible_worktree_ids.contains(worktree_id)); + } + + if let Some(share) = &worktree.share { + for guest_connection_id in share.guest_connection_ids.keys() { + let guest_connection = self.connections.get(guest_connection_id).unwrap(); + assert!(guest_connection.worktrees.contains(worktree_id)); + } + assert_eq!( + share.active_replica_ids.len(), + share.guest_connection_ids.len(), + ); + assert_eq!( + share.active_replica_ids, + share + .guest_connection_ids + .values() + .copied() + .collect::>(), + ); + } + } + + for (user_id, visible_worktree_ids) in &self.visible_worktrees_by_user_id { + for worktree_id in visible_worktree_ids { + let worktree = self.worktrees.get(worktree_id).unwrap(); + assert!(worktree.collaborator_user_ids.contains(user_id)); + } + } + + for (channel_id, channel) in &self.channels { + for connection_id in &channel.connection_ids { + let connection = self.connections.get(connection_id).unwrap(); + assert!(connection.channels.contains(channel_id)); + } + } + } +} + +impl Worktree { + pub fn connection_ids(&self) -> Vec { + if let Some(share) = &self.share { + share + .guest_connection_ids + .keys() + .copied() + .chain(Some(self.host_connection_id)) + .collect() + } else { + vec![self.host_connection_id] + } + } + + pub fn share(&self) -> tide::Result<&WorktreeShare> { + Ok(self + .share + .as_ref() + .ok_or_else(|| anyhow!("worktree is not shared"))?) + } + + fn share_mut(&mut self) -> tide::Result<&mut WorktreeShare> { + Ok(self + .share + .as_mut() + .ok_or_else(|| anyhow!("worktree is not shared"))?) + } +} + +impl Channel { + fn connection_ids(&self) -> Vec { + self.connection_ids.iter().copied().collect() + } +} diff --git a/zed/src/worktree.rs b/zed/src/worktree.rs index 93be506134121..7c766bc0d7c8a 100644 --- a/zed/src/worktree.rs +++ b/zed/src/worktree.rs @@ -66,21 +66,28 @@ impl Entity for Worktree { type Event = (); fn release(&mut self, cx: &mut MutableAppContext) { - let rpc = match self { - Self::Local(tree) => tree - .remote_id - .borrow() - .map(|remote_id| (tree.rpc.clone(), remote_id)), - Self::Remote(tree) => Some((tree.rpc.clone(), tree.remote_id)), - }; - - if let Some((rpc, worktree_id)) = rpc { - cx.spawn(|_| async move { - if let Err(err) = rpc.send(proto::CloseWorktree { worktree_id }).await { - log::error!("error closing worktree {}: {}", worktree_id, err); + match self { + Self::Local(tree) => { + if let Some(worktree_id) = *tree.remote_id.borrow() { + let rpc = tree.rpc.clone(); + cx.spawn(|_| async move { + if let Err(err) = rpc.send(proto::CloseWorktree { worktree_id }).await { + log::error!("error closing worktree: {}", err); + } + }) + .detach(); } - }) - .detach(); + } + Self::Remote(tree) => { + let rpc = tree.rpc.clone(); + let worktree_id = tree.remote_id; + cx.spawn(|_| async move { + if let Err(err) = rpc.send(proto::LeaveWorktree { worktree_id }).await { + log::error!("error closing worktree: {}", err); + } + }) + .detach(); + } } } } diff --git a/zrpc/proto/zed.proto b/zrpc/proto/zed.proto index 074bbe60938c8..340c0751aa325 100644 --- a/zrpc/proto/zed.proto +++ b/zrpc/proto/zed.proto @@ -39,6 +39,7 @@ message Envelope { OpenWorktreeResponse open_worktree_response = 34; UnshareWorktree unshare_worktree = 35; UpdateCollaborators update_collaborators = 36; + LeaveWorktree leave_worktree = 37; } } @@ -75,6 +76,10 @@ message JoinWorktree { uint64 worktree_id = 1; } +message LeaveWorktree { + uint64 worktree_id = 1; +} + message JoinWorktreeResponse { Worktree worktree = 2; uint32 replica_id = 3; diff --git a/zrpc/src/proto.rs b/zrpc/src/proto.rs index f094923af387a..92fca53e28335 100644 --- a/zrpc/src/proto.rs +++ b/zrpc/src/proto.rs @@ -139,6 +139,7 @@ messages!( JoinWorktree, JoinWorktreeResponse, LeaveChannel, + LeaveWorktree, OpenBuffer, OpenBufferResponse, OpenWorktree, From aa671f1041cf9c753630a443285c2043416e5f29 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Mon, 20 Sep 2021 19:44:30 -0600 Subject: [PATCH 20/43] Fix remaining compilation errors --- server/src/rpc.rs | 32 +++++++++++++++++++------------- server/src/rpc/store.rs | 24 ++++++++++++++---------- 2 files changed, 33 insertions(+), 23 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index 009fc9bd1ed8c..f8eae2c831b9a 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -5,12 +5,11 @@ use super::{ db::{ChannelId, MessageId, UserId}, AppState, }; -use crate::errors::TideResultExt; use anyhow::anyhow; use async_std::{sync::RwLock, task}; use async_tungstenite::{tungstenite::protocol::Role, WebSocketStream}; use futures::{future::BoxFuture, FutureExt}; -use postage::{broadcast, mpsc, prelude::Sink as _, prelude::Stream as _}; +use postage::{mpsc, prelude::Sink as _, prelude::Stream as _}; use sha1::{Digest as _, Sha1}; use std::{ any::TypeId, @@ -20,7 +19,7 @@ use std::{ sync::Arc, time::Instant, }; -use store::{ReplicaId, Store, Worktree}; +use store::{Store, Worktree}; use surf::StatusCode; use tide::log; use tide::{ @@ -71,6 +70,7 @@ impl Server { .add_handler(Server::share_worktree) .add_handler(Server::unshare_worktree) .add_handler(Server::join_worktree) + .add_handler(Server::leave_worktree) .add_handler(Server::update_worktree) .add_handler(Server::open_buffer) .add_handler(Server::close_buffer) @@ -199,7 +199,7 @@ impl Server { } self.update_collaborators_for_users(removed_connection.collaborator_ids.iter()) - .await; + .await?; Ok(()) } @@ -420,22 +420,24 @@ impl Server { } async fn leave_worktree( - self: &Arc, - worktree_id: u64, - sender_conn_id: ConnectionId, + self: Arc, + request: TypedEnvelope, ) -> tide::Result<()> { + let sender_id = request.sender_id; + let worktree_id = request.payload.worktree_id; + if let Some((connection_ids, collaborator_ids)) = self .store .write() .await - .leave_worktree(sender_conn_id, worktree_id) + .leave_worktree(sender_id, worktree_id) { - broadcast(sender_conn_id, connection_ids, |conn_id| { + broadcast(sender_id, connection_ids, |conn_id| { self.peer.send( conn_id, proto::RemovePeer { worktree_id, - peer_id: sender_conn_id.0, + peer_id: sender_id.0, }, ) }) @@ -1550,19 +1552,23 @@ mod tests { .await; assert_eq!( - server.state().await.channels[&channel_id] + server + .state() + .await + .channel(channel_id) + .unwrap() .connection_ids .len(), 2 ); cx_b.update(|_| drop(channel_b)); server - .condition(|state| state.channels[&channel_id].connection_ids.len() == 1) + .condition(|state| state.channel(channel_id).unwrap().connection_ids.len() == 1) .await; cx_a.update(|_| drop(channel_a)); server - .condition(|state| !state.channels.contains_key(&channel_id)) + .condition(|state| state.channel(channel_id).is_none()) .await; } diff --git a/server/src/rpc/store.rs b/server/src/rpc/store.rs index c7a6c2b166d71..cbc691a8d71a8 100644 --- a/server/src/rpc/store.rs +++ b/server/src/rpc/store.rs @@ -1,4 +1,4 @@ -use crate::db::{ChannelId, MessageId, UserId}; +use crate::db::{ChannelId, UserId}; use crate::errors::TideResultExt; use anyhow::anyhow; use std::collections::{hash_map, HashMap, HashSet}; @@ -27,15 +27,15 @@ pub struct Worktree { pub share: Option, } -struct WorktreeShare { +pub struct WorktreeShare { pub guest_connection_ids: HashMap, pub active_replica_ids: HashSet, pub entries: HashMap, } #[derive(Default)] -struct Channel { - connection_ids: HashSet, +pub struct Channel { + pub connection_ids: HashSet, } pub type ReplicaId = u16; @@ -73,7 +73,7 @@ impl Store { return Err(anyhow!("no such connection"))?; }; - for channel_id in connection.channels { + for channel_id in &connection.channels { if let Some(channel) = self.channels.get_mut(&channel_id) { channel.connection_ids.remove(&connection_id); } @@ -89,12 +89,12 @@ impl Store { } let mut result = RemovedConnectionState::default(); - for worktree_id in connection.worktrees { + for worktree_id in connection.worktrees.clone() { if let Ok(worktree) = self.remove_worktree(worktree_id, connection_id) { - result.hosted_worktrees.insert(worktree_id, worktree); result .collaborator_ids .extend(worktree.collaborator_user_ids.iter().copied()); + result.hosted_worktrees.insert(worktree_id, worktree); } else { if let Some(worktree) = self.worktrees.get(&worktree_id) { result @@ -110,6 +110,10 @@ impl Store { Ok(result) } + pub fn channel(&self, id: ChannelId) -> Option<&Channel> { + self.channels.get(&id) + } + pub fn join_channel(&mut self, connection_id: ConnectionId, channel_id: ChannelId) { if let Some(connection) = self.connections.get_mut(&connection_id) { connection.channels.insert(channel_id); @@ -230,7 +234,7 @@ impl Store { connection.worktrees.remove(&worktree_id); } - if let Some(share) = worktree.share { + if let Some(share) = &worktree.share { for connection_id in share.guest_connection_ids.keys() { if let Some(connection) = self.connections.get_mut(connection_id) { connection.worktrees.remove(&worktree_id); @@ -238,7 +242,7 @@ impl Store { } } - for collaborator_user_id in worktree.collaborator_user_ids { + for collaborator_user_id in &worktree.collaborator_user_ids { if let Some(visible_worktrees) = self .visible_worktrees_by_user_id .get_mut(&collaborator_user_id) @@ -289,7 +293,7 @@ impl Store { let connection_ids = worktree.connection_ids(); - if let Some(share) = worktree.share.take() { + if let Some(_) = worktree.share.take() { for connection_id in &connection_ids { if let Some(connection) = self.connections.get_mut(connection_id) { connection.worktrees.remove(&worktree_id); From 1954c6b00e4f9561284625b012f53e176632d28f Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 21 Sep 2021 11:38:50 +0200 Subject: [PATCH 21/43] Replace returned tuples with named structs in `Store` --- server/src/rpc.rs | 21 ++++++++++++--------- server/src/rpc/store.rs | 35 ++++++++++++++++++++++++++--------- 2 files changed, 38 insertions(+), 18 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index f8eae2c831b9a..ec35a27c6f5ca 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -19,7 +19,7 @@ use std::{ sync::Arc, time::Instant, }; -use store::{Store, Worktree}; +use store::{JoinedWorktree, Store, Worktree}; use surf::StatusCode; use tide::log; use tide::{ @@ -324,18 +324,18 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let worktree_id = request.payload.worktree_id; - let (connection_ids, collaborator_user_ids) = self + let worktree = self .store .write() .await .unshare_worktree(worktree_id, request.sender_id)?; - broadcast(request.sender_id, connection_ids, |conn_id| { + broadcast(request.sender_id, worktree.connection_ids, |conn_id| { self.peer .send(conn_id, proto::UnshareWorktree { worktree_id }) }) .await?; - self.update_collaborators_for_users(&collaborator_user_ids) + self.update_collaborators_for_users(&worktree.collaborator_ids) .await?; Ok(()) @@ -357,7 +357,10 @@ impl Server { let collaborator_user_ids; let mut state = self.store.write().await; match state.join_worktree(request.sender_id, user_id, worktree_id) { - Ok((peer_replica_id, worktree)) => { + Ok(JoinedWorktree { + replica_id, + worktree, + }) => { let share = worktree.share()?; let peer_count = share.guest_connection_ids.len(); let mut peers = Vec::with_capacity(peer_count); @@ -379,7 +382,7 @@ impl Server { root_name: worktree.root_name.clone(), entries: share.entries.values().cloned().collect(), }), - replica_id: peer_replica_id as u32, + replica_id: replica_id as u32, peers, }; connection_ids = worktree.connection_ids(); @@ -426,13 +429,13 @@ impl Server { let sender_id = request.sender_id; let worktree_id = request.payload.worktree_id; - if let Some((connection_ids, collaborator_ids)) = self + if let Some(worktree) = self .store .write() .await .leave_worktree(sender_id, worktree_id) { - broadcast(sender_id, connection_ids, |conn_id| { + broadcast(sender_id, worktree.connection_ids, |conn_id| { self.peer.send( conn_id, proto::RemovePeer { @@ -442,7 +445,7 @@ impl Server { ) }) .await?; - self.update_collaborators_for_users(&collaborator_ids) + self.update_collaborators_for_users(&worktree.collaborator_ids) .await?; } Ok(()) diff --git a/server/src/rpc/store.rs b/server/src/rpc/store.rs index cbc691a8d71a8..8496dbefce7de 100644 --- a/server/src/rpc/store.rs +++ b/server/src/rpc/store.rs @@ -47,6 +47,16 @@ pub struct RemovedConnectionState { pub collaborator_ids: HashSet, } +pub struct JoinedWorktree<'a> { + pub replica_id: ReplicaId, + pub worktree: &'a Worktree, +} + +pub struct WorktreeMetadata { + pub connection_ids: Vec, + pub collaborator_ids: Vec, +} + impl Store { pub fn add_connection(&mut self, connection_id: ConnectionId, user_id: UserId) { self.connections.insert( @@ -110,6 +120,7 @@ impl Store { Ok(result) } + #[cfg(test)] pub fn channel(&self, id: ChannelId) -> Option<&Channel> { self.channels.get(&id) } @@ -280,7 +291,7 @@ impl Store { &mut self, worktree_id: u64, acting_connection_id: ConnectionId, - ) -> tide::Result<(Vec, Vec)> { + ) -> tide::Result { let worktree = if let Some(worktree) = self.worktrees.get_mut(&worktree_id) { worktree } else { @@ -299,7 +310,10 @@ impl Store { connection.worktrees.remove(&worktree_id); } } - Ok((connection_ids, worktree.collaborator_user_ids.clone())) + Ok(WorktreeMetadata { + connection_ids, + collaborator_ids: worktree.collaborator_user_ids.clone(), + }) } else { Err(anyhow!("worktree is not shared"))? } @@ -310,7 +324,7 @@ impl Store { connection_id: ConnectionId, user_id: UserId, worktree_id: u64, - ) -> tide::Result<(ReplicaId, &Worktree)> { + ) -> tide::Result { let connection = self .connections .get_mut(&connection_id) @@ -336,22 +350,25 @@ impl Store { } share.active_replica_ids.insert(replica_id); share.guest_connection_ids.insert(connection_id, replica_id); - return Ok((replica_id, worktree)); + Ok(JoinedWorktree { + replica_id, + worktree, + }) } pub fn leave_worktree( &mut self, connection_id: ConnectionId, worktree_id: u64, - ) -> Option<(Vec, Vec)> { + ) -> Option { let worktree = self.worktrees.get_mut(&worktree_id)?; let share = worktree.share.as_mut()?; let replica_id = share.guest_connection_ids.remove(&connection_id)?; share.active_replica_ids.remove(&replica_id); - Some(( - worktree.connection_ids(), - worktree.collaborator_user_ids.clone(), - )) + Some(WorktreeMetadata { + connection_ids: worktree.connection_ids(), + collaborator_ids: worktree.collaborator_user_ids.clone(), + }) } pub fn update_worktree( From 0b11192fe3cbcc81f48cf4c7e1321d2bd8598ed6 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 21 Sep 2021 11:58:31 +0200 Subject: [PATCH 22/43] Remove deadlock and make integration tests pass again --- server/src/rpc.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index ec35a27c6f5ca..5aae943b58d4b 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -295,12 +295,12 @@ impl Server { .map(|entry| (entry.id, entry)) .collect(); - if let Some(collaborator_user_ids) = + let collaborator_user_ids = self.store .write() .await - .share_worktree(worktree.id, request.sender_id, entries) - { + .share_worktree(worktree.id, request.sender_id, entries); + if let Some(collaborator_user_ids) = collaborator_user_ids { self.peer .respond(request.receipt(), proto::ShareWorktreeResponse {}) .await?; From d381020a60e5eae6f036240dfd8bc0406eeea371 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 21 Sep 2021 12:19:52 +0200 Subject: [PATCH 23/43] Add `Server::{state,state_mut}` to catch most deadlocks statically --- server/src/rpc.rs | 150 ++++++++++++++++++++++------------------------ 1 file changed, 71 insertions(+), 79 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index 5aae943b58d4b..68f1ff7d82033 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -112,12 +112,11 @@ impl Server { addr: String, user_id: UserId, ) -> impl Future { - let this = self.clone(); + let mut this = self.clone(); async move { let (connection_id, handle_io, mut incoming_rx) = this.peer.add_connection(connection).await; - this.store - .write() + this.state_mut() .await .add_connection(connection_id, user_id); if let Err(err) = this.update_collaborators_for_users(&[user_id]).await { @@ -167,9 +166,9 @@ impl Server { } } - async fn sign_out(self: &Arc, connection_id: ConnectionId) -> tide::Result<()> { + async fn sign_out(self: &mut Arc, connection_id: ConnectionId) -> tide::Result<()> { self.peer.disconnect(connection_id).await; - let removed_connection = self.store.write().await.remove_connection(connection_id)?; + let removed_connection = self.state_mut().await.remove_connection(connection_id)?; for (worktree_id, worktree) in removed_connection.hosted_worktrees { if let Some(share) = worktree.share { @@ -210,13 +209,12 @@ impl Server { } async fn open_worktree( - self: Arc, + mut self: Arc, request: TypedEnvelope, ) -> tide::Result<()> { let receipt = request.receipt(); let host_user_id = self - .store - .read() + .state() .await .user_id_for_connection(request.sender_id)?; @@ -238,7 +236,7 @@ impl Server { } let collaborator_user_ids = collaborator_user_ids.into_iter().collect::>(); - let worktree_id = self.store.write().await.add_worktree(Worktree { + let worktree_id = self.state_mut().await.add_worktree(Worktree { host_connection_id: request.sender_id, collaborator_user_ids: collaborator_user_ids.clone(), root_name: request.payload.root_name, @@ -255,13 +253,12 @@ impl Server { } async fn close_worktree( - self: Arc, + mut self: Arc, request: TypedEnvelope, ) -> tide::Result<()> { let worktree_id = request.payload.worktree_id; let worktree = self - .store - .write() + .state_mut() .await .remove_worktree(worktree_id, request.sender_id)?; @@ -282,7 +279,7 @@ impl Server { } async fn share_worktree( - self: Arc, + mut self: Arc, mut request: TypedEnvelope, ) -> tide::Result<()> { let worktree = request @@ -296,8 +293,7 @@ impl Server { .collect(); let collaborator_user_ids = - self.store - .write() + self.state_mut() .await .share_worktree(worktree.id, request.sender_id, entries); if let Some(collaborator_user_ids) = collaborator_user_ids { @@ -320,13 +316,12 @@ impl Server { } async fn unshare_worktree( - self: Arc, + mut self: Arc, request: TypedEnvelope, ) -> tide::Result<()> { let worktree_id = request.payload.worktree_id; let worktree = self - .store - .write() + .state_mut() .await .unshare_worktree(worktree_id, request.sender_id)?; @@ -342,20 +337,16 @@ impl Server { } async fn join_worktree( - self: Arc, + mut self: Arc, request: TypedEnvelope, ) -> tide::Result<()> { let worktree_id = request.payload.worktree_id; let user_id = self - .store - .read() + .state() .await .user_id_for_connection(request.sender_id)?; - let response; - let connection_ids; - let collaborator_user_ids; - let mut state = self.store.write().await; + let mut state = self.state_mut().await; match state.join_worktree(request.sender_id, user_id, worktree_id) { Ok(JoinedWorktree { replica_id, @@ -376,7 +367,7 @@ impl Server { }); } } - response = proto::JoinWorktreeResponse { + let response = proto::JoinWorktreeResponse { worktree: Some(proto::Worktree { id: worktree_id, root_name: worktree.root_name.clone(), @@ -385,10 +376,29 @@ impl Server { replica_id: replica_id as u32, peers, }; - connection_ids = worktree.connection_ids(); - collaborator_user_ids = worktree.collaborator_user_ids.clone(); + let connection_ids = worktree.connection_ids(); + let collaborator_user_ids = worktree.collaborator_user_ids.clone(); + drop(state); + + broadcast(request.sender_id, connection_ids, |conn_id| { + self.peer.send( + conn_id, + proto::AddPeer { + worktree_id, + peer: Some(proto::Peer { + peer_id: request.sender_id.0, + replica_id: response.replica_id, + }), + }, + ) + }) + .await?; + self.peer.respond(request.receipt(), response).await?; + self.update_collaborators_for_users(&collaborator_user_ids) + .await?; } Err(error) => { + drop(state); self.peer .respond_with_error( request.receipt(), @@ -397,44 +407,23 @@ impl Server { }, ) .await?; - return Ok(()); } } - drop(state); - broadcast(request.sender_id, connection_ids, |conn_id| { - self.peer.send( - conn_id, - proto::AddPeer { - worktree_id, - peer: Some(proto::Peer { - peer_id: request.sender_id.0, - replica_id: response.replica_id, - }), - }, - ) - }) - .await?; - self.peer.respond(request.receipt(), response).await?; - self.update_collaborators_for_users(&collaborator_user_ids) - .await?; - Ok(()) } async fn leave_worktree( - self: Arc, + mut self: Arc, request: TypedEnvelope, ) -> tide::Result<()> { let sender_id = request.sender_id; let worktree_id = request.payload.worktree_id; - - if let Some(worktree) = self - .store - .write() + let worktree = self + .state_mut() .await - .leave_worktree(sender_id, worktree_id) - { + .leave_worktree(sender_id, worktree_id); + if let Some(worktree) = worktree { broadcast(sender_id, worktree.connection_ids, |conn_id| { self.peer.send( conn_id, @@ -452,10 +441,10 @@ impl Server { } async fn update_worktree( - self: Arc, + mut self: Arc, request: TypedEnvelope, ) -> tide::Result<()> { - let connection_ids = self.store.write().await.update_worktree( + let connection_ids = self.state_mut().await.update_worktree( request.sender_id, request.payload.worktree_id, &request.payload.removed_entries, @@ -477,8 +466,7 @@ impl Server { ) -> tide::Result<()> { let receipt = request.receipt(); let host_connection_id = self - .store - .read() + .state() .await .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?; let response = self @@ -494,8 +482,7 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let host_connection_id = self - .store - .read() + .state() .await .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?; self.peer @@ -511,7 +498,7 @@ impl Server { let host; let guests; { - let state = self.store.read().await; + let state = self.state().await; host = state .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?; guests = state @@ -547,8 +534,7 @@ impl Server { ) -> tide::Result<()> { broadcast( request.sender_id, - self.store - .read() + self.state() .await .worktree_connection_ids(request.sender_id, request.payload.worktree_id)?, |connection_id| { @@ -585,8 +571,7 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let user_id = self - .store - .read() + .state() .await .user_id_for_connection(request.sender_id)?; let channels = self.app_state.db.get_accessible_channels(user_id).await?; @@ -637,7 +622,7 @@ impl Server { ) -> tide::Result<()> { let mut send_futures = Vec::new(); - let state = self.store.read().await; + let state = self.state().await; for user_id in user_ids { let collaborators = state.collaborators_for_user(*user_id); for connection_id in state.connection_ids_for_user(*user_id) { @@ -657,12 +642,11 @@ impl Server { } async fn join_channel( - self: Arc, + mut self: Arc, request: TypedEnvelope, ) -> tide::Result<()> { let user_id = self - .store - .read() + .state() .await .user_id_for_connection(request.sender_id)?; let channel_id = ChannelId::from_proto(request.payload.channel_id); @@ -675,8 +659,7 @@ impl Server { Err(anyhow!("access denied"))?; } - self.store - .write() + self.state_mut() .await .join_channel(request.sender_id, channel_id); let messages = self @@ -706,12 +689,11 @@ impl Server { } async fn leave_channel( - self: Arc, + mut self: Arc, request: TypedEnvelope, ) -> tide::Result<()> { let user_id = self - .store - .read() + .state() .await .user_id_for_connection(request.sender_id)?; let channel_id = ChannelId::from_proto(request.payload.channel_id); @@ -724,8 +706,7 @@ impl Server { Err(anyhow!("access denied"))?; } - self.store - .write() + self.state_mut() .await .leave_channel(request.sender_id, channel_id); @@ -741,7 +722,7 @@ impl Server { let user_id; let connection_ids; { - let state = self.store.read().await; + let state = self.state().await; user_id = state.user_id_for_connection(request.sender_id)?; if let Some(ids) = state.channel_connection_ids(channel_id) { connection_ids = ids; @@ -829,8 +810,7 @@ impl Server { request: TypedEnvelope, ) -> tide::Result<()> { let user_id = self - .store - .read() + .state() .await .user_id_for_connection(request.sender_id)?; let channel_id = ChannelId::from_proto(request.payload.channel_id); @@ -872,6 +852,18 @@ impl Server { .await?; Ok(()) } + + fn state<'a>( + self: &'a Arc, + ) -> impl Future> { + self.store.read() + } + + fn state_mut<'a>( + self: &'a mut Arc, + ) -> impl Future> { + self.store.write() + } } pub async fn broadcast( From b9d46366ed0f2b97d281b2896f6cc96bdd59c1b2 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 21 Sep 2021 16:20:20 +0200 Subject: [PATCH 24/43] Fix more failing/hanging unit tests --- zed/src/channel.rs | 30 +++++++++++++++--------------- zed/src/worktree.rs | 24 ++++++++++++++++-------- 2 files changed, 31 insertions(+), 23 deletions(-) diff --git a/zed/src/channel.rs b/zed/src/channel.rs index f042a2e5088ec..f0e7a90c9aed2 100644 --- a/zed/src/channel.rs +++ b/zed/src/channel.rs @@ -615,21 +615,6 @@ mod tests { let channel_list = cx.add_model(|cx| ChannelList::new(user_store, client.clone(), cx)); channel_list.read_with(&cx, |list, _| assert_eq!(list.available_channels(), None)); - let get_users = server.receive::().await.unwrap(); - assert_eq!(get_users.payload.user_ids, vec![5]); - server - .respond( - get_users.receipt(), - proto::GetUsersResponse { - users: vec![proto::User { - id: 5, - github_login: "nathansobo".into(), - avatar_url: "http://avatar.com/nathansobo".into(), - }], - }, - ) - .await; - // Get the available channels. let get_channels = server.receive::().await.unwrap(); server @@ -654,6 +639,21 @@ mod tests { ) }); + let get_users = server.receive::().await.unwrap(); + assert_eq!(get_users.payload.user_ids, vec![5]); + server + .respond( + get_users.receipt(), + proto::GetUsersResponse { + users: vec![proto::User { + id: 5, + github_login: "nathansobo".into(), + avatar_url: "http://avatar.com/nathansobo".into(), + }], + }, + ) + .await; + // Join a channel and populate its existing messages. let channel = channel_list .update(&mut cx, |list, cx| { diff --git a/zed/src/worktree.rs b/zed/src/worktree.rs index 7c766bc0d7c8a..bbf8381784a51 100644 --- a/zed/src/worktree.rs +++ b/zed/src/worktree.rs @@ -2806,8 +2806,11 @@ mod tests { } })); + let user_id = 5; + let mut client = rpc::Client::new(); + let server = FakeServer::for_client(user_id, &mut client, &cx).await; let tree = Worktree::open_local( - rpc::Client::new(), + client, dir.path(), Arc::new(RealFs), Default::default(), @@ -2844,15 +2847,20 @@ mod tests { // Create a remote copy of this worktree. let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot()); let worktree_id = 1; - let share_request = tree - .update(&mut cx, |tree, cx| { - tree.as_local().unwrap().share_request(cx) - }) - .await - .unwrap(); + let share_request = tree.update(&mut cx, |tree, cx| { + tree.as_local().unwrap().share_request(cx) + }); + let open_worktree = server.receive::().await.unwrap(); + server + .respond( + open_worktree.receipt(), + proto::OpenWorktreeResponse { worktree_id: 1 }, + ) + .await; + let remote = Worktree::remote( proto::JoinWorktreeResponse { - worktree: share_request.worktree, + worktree: share_request.await.unwrap().worktree, replica_id: 1, peers: Vec::new(), }, From 7c10faeccf9cb1f07c0ad2018e8035ce7d96286e Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 21 Sep 2021 17:21:30 +0200 Subject: [PATCH 25/43] Use more concrete names for `Store` return values Co-Authored-By: Nathan Sobo --- server/src/rpc/store.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/server/src/rpc/store.rs b/server/src/rpc/store.rs index 8496dbefce7de..15d2e6a920bc8 100644 --- a/server/src/rpc/store.rs +++ b/server/src/rpc/store.rs @@ -52,7 +52,12 @@ pub struct JoinedWorktree<'a> { pub worktree: &'a Worktree, } -pub struct WorktreeMetadata { +pub struct UnsharedWorktree { + pub connection_ids: Vec, + pub collaborator_ids: Vec, +} + +pub struct LeftWorktree { pub connection_ids: Vec, pub collaborator_ids: Vec, } @@ -291,7 +296,7 @@ impl Store { &mut self, worktree_id: u64, acting_connection_id: ConnectionId, - ) -> tide::Result { + ) -> tide::Result { let worktree = if let Some(worktree) = self.worktrees.get_mut(&worktree_id) { worktree } else { @@ -310,7 +315,7 @@ impl Store { connection.worktrees.remove(&worktree_id); } } - Ok(WorktreeMetadata { + Ok(UnsharedWorktree { connection_ids, collaborator_ids: worktree.collaborator_user_ids.clone(), }) @@ -360,12 +365,12 @@ impl Store { &mut self, connection_id: ConnectionId, worktree_id: u64, - ) -> Option { + ) -> Option { let worktree = self.worktrees.get_mut(&worktree_id)?; let share = worktree.share.as_mut()?; let replica_id = share.guest_connection_ids.remove(&connection_id)?; share.active_replica_ids.remove(&replica_id); - Some(WorktreeMetadata { + Some(LeftWorktree { connection_ids: worktree.connection_ids(), collaborator_ids: worktree.collaborator_user_ids.clone(), }) From f8990b707a4f39f2d90e2a8578e621f49ebbee8f Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 21 Sep 2021 18:13:02 +0200 Subject: [PATCH 26/43] Style people panel Co-Authored-By: Nathan Sobo --- gpui/src/color.rs | 8 ++ gpui/src/elements.rs | 5 +- gpui/src/elements/label.rs | 3 +- gpui/src/elements/line_box.rs | 87 ---------------------- gpui/src/font_cache.rs | 12 +++ gpui/src/fonts.rs | 12 +++ zed/assets/themes/_base.toml | 13 +++- zed/src/people_panel.rs | 135 ++++++++++++++++++++++++++-------- zed/src/theme.rs | 9 ++- 9 files changed, 158 insertions(+), 126 deletions(-) delete mode 100644 gpui/src/elements/line_box.rs diff --git a/gpui/src/color.rs b/gpui/src/color.rs index 9e31530b27f8b..5adf03daef73e 100644 --- a/gpui/src/color.rs +++ b/gpui/src/color.rs @@ -33,6 +33,14 @@ impl Color { Self(ColorU::from_u32(0xff0000ff)) } + pub fn green() -> Self { + Self(ColorU::from_u32(0x00ff00ff)) + } + + pub fn blue() -> Self { + Self(ColorU::from_u32(0x0000ffff)) + } + pub fn new(r: u8, g: u8, b: u8, a: u8) -> Self { Self(ColorU::new(r, g, b, a)) } diff --git a/gpui/src/elements.rs b/gpui/src/elements.rs index 42e9810cfbff2..08f8732e0e594 100644 --- a/gpui/src/elements.rs +++ b/gpui/src/elements.rs @@ -8,7 +8,6 @@ mod flex; mod hook; mod image; mod label; -mod line_box; mod list; mod mouse_event_handler; mod overlay; @@ -19,8 +18,8 @@ mod uniform_list; pub use self::{ align::*, canvas::*, constrained_box::*, container::*, empty::*, event_handler::*, flex::*, - hook::*, image::*, label::*, line_box::*, list::*, mouse_event_handler::*, overlay::*, - stack::*, svg::*, text::*, uniform_list::*, + hook::*, image::*, label::*, list::*, mouse_event_handler::*, overlay::*, stack::*, svg::*, + text::*, uniform_list::*, }; pub use crate::presenter::ChildView; use crate::{ diff --git a/gpui/src/elements/label.rs b/gpui/src/elements/label.rs index acfbb5abd9e7d..c1e048eb93e0c 100644 --- a/gpui/src/elements/label.rs +++ b/gpui/src/elements/label.rs @@ -137,8 +137,7 @@ impl Element for Label { let size = vec2f( line.width().max(constraint.min.x()).min(constraint.max.x()), cx.font_cache - .line_height(self.style.text.font_id, self.style.text.font_size) - .ceil(), + .line_height(self.style.text.font_id, self.style.text.font_size), ); (size, line) diff --git a/gpui/src/elements/line_box.rs b/gpui/src/elements/line_box.rs deleted file mode 100644 index 33fd2510c8869..0000000000000 --- a/gpui/src/elements/line_box.rs +++ /dev/null @@ -1,87 +0,0 @@ -use crate::{ - fonts::TextStyle, - geometry::{ - rect::RectF, - vector::{vec2f, Vector2F}, - }, - json::{json, ToJson}, - DebugContext, Element, ElementBox, Event, EventContext, LayoutContext, PaintContext, - SizeConstraint, -}; - -pub struct LineBox { - child: ElementBox, - style: TextStyle, -} - -impl LineBox { - pub fn new(child: ElementBox, style: TextStyle) -> Self { - Self { child, style } - } -} - -impl Element for LineBox { - type LayoutState = f32; - type PaintState = (); - - fn layout( - &mut self, - constraint: SizeConstraint, - cx: &mut LayoutContext, - ) -> (Vector2F, Self::LayoutState) { - let line_height = cx - .font_cache - .line_height(self.style.font_id, self.style.font_size); - let character_height = cx - .font_cache - .ascent(self.style.font_id, self.style.font_size) - + cx.font_cache - .descent(self.style.font_id, self.style.font_size); - let child_max = vec2f(constraint.max.x(), character_height); - let child_size = self.child.layout( - SizeConstraint::new(constraint.min.min(child_max), child_max), - cx, - ); - let size = vec2f(child_size.x(), line_height); - (size, (line_height - character_height) / 2.) - } - - fn paint( - &mut self, - bounds: RectF, - visible_bounds: RectF, - padding_top: &mut f32, - cx: &mut PaintContext, - ) -> Self::PaintState { - self.child.paint( - bounds.origin() + vec2f(0., *padding_top), - visible_bounds, - cx, - ); - } - - fn dispatch_event( - &mut self, - event: &Event, - _: RectF, - _: &mut Self::LayoutState, - _: &mut Self::PaintState, - cx: &mut EventContext, - ) -> bool { - self.child.dispatch_event(event, cx) - } - - fn debug( - &self, - bounds: RectF, - _: &Self::LayoutState, - _: &Self::PaintState, - cx: &DebugContext, - ) -> serde_json::Value { - json!({ - "bounds": bounds.to_json(), - "style": self.style.to_json(), - "child": self.child.debug(cx), - }) - } -} diff --git a/gpui/src/font_cache.rs b/gpui/src/font_cache.rs index c0255a7af5f25..0509ecd437d13 100644 --- a/gpui/src/font_cache.rs +++ b/gpui/src/font_cache.rs @@ -166,6 +166,10 @@ impl FontCache { self.metric(font_id, |m| m.cap_height) * self.em_scale(font_id, font_size) } + pub fn x_height(&self, font_id: FontId, font_size: f32) -> f32 { + self.metric(font_id, |m| m.x_height) * self.em_scale(font_id, font_size) + } + pub fn ascent(&self, font_id: FontId, font_size: f32) -> f32 { self.metric(font_id, |m| m.ascent) * self.em_scale(font_id, font_size) } @@ -178,6 +182,14 @@ impl FontCache { font_size / self.metric(font_id, |m| m.units_per_em as f32) } + pub fn baseline_offset(&self, font_id: FontId, font_size: f32) -> f32 { + let line_height = self.line_height(font_id, font_size); + let ascent = self.ascent(font_id, font_size); + let descent = self.descent(font_id, font_size); + let padding_top = (line_height - ascent - descent) / 2.; + padding_top + ascent + } + pub fn line_wrapper(self: &Arc, font_id: FontId, font_size: f32) -> LineWrapperHandle { let mut state = self.0.write(); let wrappers = state diff --git a/gpui/src/fonts.rs b/gpui/src/fonts.rs index 3ec8aad9626bf..f2bbd04477bdf 100644 --- a/gpui/src/fonts.rs +++ b/gpui/src/fonts.rs @@ -132,6 +132,14 @@ impl TextStyle { font_cache.line_height(self.font_id, self.font_size) } + pub fn cap_height(&self, font_cache: &FontCache) -> f32 { + font_cache.cap_height(self.font_id, self.font_size) + } + + pub fn x_height(&self, font_cache: &FontCache) -> f32 { + font_cache.x_height(self.font_id, self.font_size) + } + pub fn em_width(&self, font_cache: &FontCache) -> f32 { font_cache.em_width(self.font_id, self.font_size) } @@ -140,6 +148,10 @@ impl TextStyle { font_cache.metric(self.font_id, |m| m.descent) * self.em_scale(font_cache) } + pub fn baseline_offset(&self, font_cache: &FontCache) -> f32 { + font_cache.baseline_offset(self.font_id, self.font_size) + } + fn em_scale(&self, font_cache: &FontCache) -> f32 { font_cache.em_scale(self.font_id, self.font_size) } diff --git a/zed/assets/themes/_base.toml b/zed/assets/themes/_base.toml index 3c2181ae071bd..eda656b267ec0 100644 --- a/zed/assets/themes/_base.toml +++ b/zed/assets/themes/_base.toml @@ -56,10 +56,13 @@ border = { width = 1, color = "$border.0", right = true } extends = "$workspace.sidebar" border = { width = 1, color = "$border.0", left = true } +[panel] +padding = 12 + [chat_panel] +extends = "$panel" channel_name = { extends = "$text.0", weight = "bold" } channel_name_hash = { text = "$text.2", padding.right = 8 } -padding = 12 [chat_panel.message] body = "$text.1" @@ -121,12 +124,18 @@ extends = "$chat_panel.sign_in_prompt" color = "$text.1.color" [people_panel] -host_username = "$text.0" +extends = "$panel" +host_username = { extends = "$text.0", padding.left = 5 } worktree_host_avatar = { corner_radius = 10 } worktree_guest_avatar = { corner_radius = 8 } [people_panel.worktree_name] extends = "$text.0" +padding = { left = 5 } + +[people_panel.tree_branch] +width = 1 +color = "$surface.2" [selector] background = "$surface.0" diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs index fc459825de043..771fd6713fb94 100644 --- a/zed/src/people_panel.rs +++ b/zed/src/people_panel.rs @@ -4,7 +4,9 @@ use crate::{ Settings, }; use gpui::{ - elements::*, Element, ElementBox, Entity, ModelHandle, RenderContext, Subscription, View, + elements::*, + geometry::{rect::RectF, vector::vec2f}, + Element, ElementBox, Entity, FontCache, ModelHandle, RenderContext, Subscription, View, ViewContext, }; use postage::watch; @@ -12,6 +14,7 @@ use postage::watch; pub struct PeoplePanel { collaborators: ListState, user_store: ModelHandle, + settings: watch::Receiver, _maintain_collaborators: Subscription, } @@ -28,15 +31,19 @@ impl PeoplePanel { 1000., { let user_store = user_store.clone(); + let settings = settings.clone(); move |ix, cx| { - let user_store = user_store.read(cx); - let settings = settings.borrow(); - Self::render_collaborator(&user_store.collaborators()[ix], &settings.theme) + Self::render_collaborator( + &user_store.read(cx).collaborators()[ix], + &settings.borrow().theme, + cx.font_cache(), + ) } }, ), _maintain_collaborators: cx.observe(&user_store, Self::update_collaborators), user_store, + settings, } } @@ -46,54 +53,117 @@ impl PeoplePanel { cx.notify(); } - fn render_collaborator(collaborator: &Collaborator, theme: &Theme) -> ElementBox { + fn render_collaborator( + collaborator: &Collaborator, + theme: &Theme, + font_cache: &FontCache, + ) -> ElementBox { + let theme = &theme.people_panel; + let worktree_count = collaborator.worktrees.len(); + let line_height = theme.worktree_name.text.line_height(font_cache); + let cap_height = theme.worktree_name.text.cap_height(font_cache); + let baseline_offset = theme.worktree_name.text.baseline_offset(font_cache); + let tree_branch = theme.tree_branch; + Flex::column() .with_child( Flex::row() .with_children(collaborator.user.avatar.clone().map(|avatar| { ConstrainedBox::new( Image::new(avatar) - .with_style(theme.people_panel.worktree_host_avatar) + .with_style(theme.worktree_host_avatar) .boxed(), ) .with_width(20.) .boxed() })) - .with_child( - Label::new( - collaborator.user.github_login.clone(), - theme.people_panel.host_username.clone(), - ) - .boxed(), - ) - .boxed(), - ) - .with_children(collaborator.worktrees.iter().map(|worktree| { - Flex::row() .with_child( Container::new( Label::new( - worktree.root_name.clone(), - theme.people_panel.worktree_name.text.clone(), + collaborator.user.github_login.clone(), + theme.host_username.text.clone(), ) .boxed(), ) - .with_style(theme.people_panel.worktree_name.container) + .with_style(theme.host_username.container) .boxed(), ) - .with_children(worktree.participants.iter().filter_map(|participant| { - participant.avatar.clone().map(|avatar| { - ConstrainedBox::new( - Image::new(avatar) - .with_style(theme.people_panel.worktree_guest_avatar) + .boxed(), + ) + .with_children( + collaborator + .worktrees + .iter() + .enumerate() + .map(|(ix, worktree)| { + Flex::row() + .with_child( + ConstrainedBox::new( + Canvas::new(move |bounds, _, cx| { + let start_x = bounds.min_x() + (bounds.width() / 2.) + - (tree_branch.width / 2.); + let end_x = bounds.max_x(); + let start_y = bounds.min_y(); + let end_y = + bounds.min_y() + baseline_offset - (cap_height / 2.); + + cx.scene.push_quad(gpui::Quad { + bounds: RectF::from_points( + vec2f(start_x, start_y), + vec2f( + start_x + tree_branch.width, + if ix + 1 == worktree_count { + end_y + } else { + bounds.max_y() + }, + ), + ), + background: Some(tree_branch.color), + border: gpui::Border::default(), + corner_radius: 0., + }); + cx.scene.push_quad(gpui::Quad { + bounds: RectF::from_points( + vec2f(start_x, end_y), + vec2f(end_x, end_y + tree_branch.width), + ), + background: Some(tree_branch.color), + border: gpui::Border::default(), + corner_radius: 0., + }); + }) .boxed(), + ) + .with_width(20.) + .with_height(line_height) + .boxed(), ) - .with_width(16.) + .with_child( + Container::new( + Label::new( + worktree.root_name.clone(), + theme.worktree_name.text.clone(), + ) + .boxed(), + ) + .with_style(theme.worktree_name.container) + .boxed(), + ) + .with_children(worktree.participants.iter().filter_map(|participant| { + participant.avatar.clone().map(|avatar| { + ConstrainedBox::new( + Image::new(avatar) + .with_style(theme.worktree_guest_avatar) + .boxed(), + ) + .with_width(16.) + .boxed() + }) + })) .boxed() - }) - })) - .boxed() - })) + }), + ) .boxed() } } @@ -110,6 +180,9 @@ impl View for PeoplePanel { } fn render(&mut self, _: &mut RenderContext) -> ElementBox { - List::new(self.collaborators.clone()).boxed() + let theme = &self.settings.borrow().theme.people_panel; + Container::new(List::new(self.collaborators.clone()).boxed()) + .with_style(theme.container) + .boxed() } } diff --git a/zed/src/theme.rs b/zed/src/theme.rs index 59a1ec96343e4..b7a4f41023dda 100644 --- a/zed/src/theme.rs +++ b/zed/src/theme.rs @@ -109,10 +109,17 @@ pub struct ChatPanel { pub struct PeoplePanel { #[serde(flatten)] pub container: ContainerStyle, - pub host_username: TextStyle, + pub host_username: ContainedText, pub worktree_name: ContainedText, pub worktree_host_avatar: ImageStyle, pub worktree_guest_avatar: ImageStyle, + pub tree_branch: TreeBranch, +} + +#[derive(Copy, Clone, Deserialize)] +pub struct TreeBranch { + pub width: f32, + pub color: Color, } #[derive(Deserialize)] From 1bd6cd09783b52cfd328f029fb8c2913b2e9e7bb Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 21 Sep 2021 18:27:26 +0200 Subject: [PATCH 27/43] Allow size to be specified in `ImageStyle` Co-Authored-By: Nathan Sobo --- gpui/src/elements/image.rs | 19 ++++++++++++++++--- gpui/src/presenter.rs | 7 +++++++ zed/assets/themes/_base.toml | 4 ++-- zed/src/people_panel.rs | 20 ++++++-------------- 4 files changed, 31 insertions(+), 19 deletions(-) diff --git a/gpui/src/elements/image.rs b/gpui/src/elements/image.rs index 421e18ec95ce7..57644986dc005 100644 --- a/gpui/src/elements/image.rs +++ b/gpui/src/elements/image.rs @@ -1,6 +1,9 @@ use super::constrain_size_preserving_aspect_ratio; use crate::{ - geometry::{rect::RectF, vector::Vector2F}, + geometry::{ + rect::RectF, + vector::{vec2f, Vector2F}, + }, json::{json, ToJson}, scene, Border, DebugContext, Element, Event, EventContext, ImageData, LayoutContext, PaintContext, SizeConstraint, @@ -19,6 +22,10 @@ pub struct ImageStyle { border: Border, #[serde(default)] corner_radius: f32, + #[serde(default)] + height: Option, + #[serde(default)] + width: Option, } impl Image { @@ -44,8 +51,14 @@ impl Element for Image { constraint: SizeConstraint, _: &mut LayoutContext, ) -> (Vector2F, Self::LayoutState) { - let size = - constrain_size_preserving_aspect_ratio(constraint.max, self.data.size().to_f32()); + let desired_size = vec2f( + self.style.width.unwrap_or(constraint.max.x()), + self.style.height.unwrap_or(constraint.max.y()), + ); + let size = constrain_size_preserving_aspect_ratio( + constraint.constrain(desired_size), + self.data.size().to_f32(), + ); (size, ()) } diff --git a/gpui/src/presenter.rs b/gpui/src/presenter.rs index 2062397e9e654..d6765bec29876 100644 --- a/gpui/src/presenter.rs +++ b/gpui/src/presenter.rs @@ -432,6 +432,13 @@ impl SizeConstraint { Axis::Vertical => self.min.y(), } } + + pub fn constrain(&self, size: Vector2F) -> Vector2F { + vec2f( + size.x().min(self.max.x()).max(self.min.x()), + size.y().min(self.max.y()).max(self.min.y()), + ) + } } impl ToJson for SizeConstraint { diff --git a/zed/assets/themes/_base.toml b/zed/assets/themes/_base.toml index eda656b267ec0..7b1fd8b9ffd75 100644 --- a/zed/assets/themes/_base.toml +++ b/zed/assets/themes/_base.toml @@ -126,8 +126,8 @@ color = "$text.1.color" [people_panel] extends = "$panel" host_username = { extends = "$text.0", padding.left = 5 } -worktree_host_avatar = { corner_radius = 10 } -worktree_guest_avatar = { corner_radius = 8 } +worktree_host_avatar = { corner_radius = 10, width = 20 } +worktree_guest_avatar = { corner_radius = 8, width = 16 } [people_panel.worktree_name] extends = "$text.0" diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs index 771fd6713fb94..769b2d7d9b80d 100644 --- a/zed/src/people_panel.rs +++ b/zed/src/people_panel.rs @@ -69,13 +69,9 @@ impl PeoplePanel { .with_child( Flex::row() .with_children(collaborator.user.avatar.clone().map(|avatar| { - ConstrainedBox::new( - Image::new(avatar) - .with_style(theme.worktree_host_avatar) - .boxed(), - ) - .with_width(20.) - .boxed() + Image::new(avatar) + .with_style(theme.worktree_host_avatar) + .boxed() })) .with_child( Container::new( @@ -152,13 +148,9 @@ impl PeoplePanel { ) .with_children(worktree.participants.iter().filter_map(|participant| { participant.avatar.clone().map(|avatar| { - ConstrainedBox::new( - Image::new(avatar) - .with_style(theme.worktree_guest_avatar) - .boxed(), - ) - .with_width(16.) - .boxed() + Image::new(avatar) + .with_style(theme.worktree_guest_avatar) + .boxed() }) })) .boxed() From c90dc7235eb0c6f00686af0275ad2cb2a6dc9d81 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 21 Sep 2021 18:30:49 +0200 Subject: [PATCH 28/43] Rename `participants` to `guests` in proto Co-Authored-By: Nathan Sobo --- server/src/rpc.rs | 5 +---- server/src/rpc/store.rs | 2 +- zed/src/people_panel.rs | 2 +- zed/src/user.rs | 12 ++++++------ zrpc/proto/zed.proto | 2 +- 5 files changed, 10 insertions(+), 13 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index f957cbffa9161..51df8fea7a32c 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -1947,10 +1947,7 @@ mod tests { .map(|w| { ( w.root_name.as_str(), - w.participants - .iter() - .map(|p| p.github_login.as_str()) - .collect(), + w.guests.iter().map(|p| p.github_login.as_str()).collect(), ) }) .collect(); diff --git a/server/src/rpc/store.rs b/server/src/rpc/store.rs index 15d2e6a920bc8..f57e7ed59b891 100644 --- a/server/src/rpc/store.rs +++ b/server/src/rpc/store.rs @@ -204,7 +204,7 @@ impl Store { host.worktrees.push(proto::WorktreeMetadata { root_name: worktree.root_name.clone(), is_shared: worktree.share().is_ok(), - participants: guests.into_iter().collect(), + guests: guests.into_iter().collect(), }); } } diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs index 769b2d7d9b80d..26dbc19f74c00 100644 --- a/zed/src/people_panel.rs +++ b/zed/src/people_panel.rs @@ -146,7 +146,7 @@ impl PeoplePanel { .with_style(theme.worktree_name.container) .boxed(), ) - .with_children(worktree.participants.iter().filter_map(|participant| { + .with_children(worktree.guests.iter().filter_map(|participant| { participant.avatar.clone().map(|avatar| { Image::new(avatar) .with_style(theme.worktree_guest_avatar) diff --git a/zed/src/user.rs b/zed/src/user.rs index 1ed7a1662a4de..32c271257fc49 100644 --- a/zed/src/user.rs +++ b/zed/src/user.rs @@ -30,7 +30,7 @@ pub struct Collaborator { pub struct WorktreeMetadata { pub root_name: String, pub is_shared: bool, - pub participants: Vec>, + pub guests: Vec>, } pub struct UserStore { @@ -112,7 +112,7 @@ impl UserStore { collaborator .worktrees .iter() - .flat_map(|w| &w.participants) + .flat_map(|w| &w.guests) .copied(), ); } @@ -224,9 +224,9 @@ impl Collaborator { .await?; let mut worktrees = Vec::new(); for worktree in collaborator.worktrees { - let mut participants = Vec::new(); - for participant_id in worktree.participants { - participants.push( + let mut guests = Vec::new(); + for participant_id in worktree.guests { + guests.push( user_store .update(cx, |user_store, cx| { user_store.fetch_user(participant_id, cx) @@ -237,7 +237,7 @@ impl Collaborator { worktrees.push(WorktreeMetadata { root_name: worktree.root_name, is_shared: worktree.is_shared, - participants, + guests, }); } Ok(Self { user, worktrees }) diff --git a/zrpc/proto/zed.proto b/zrpc/proto/zed.proto index 340c0751aa325..be85fdecd6ba4 100644 --- a/zrpc/proto/zed.proto +++ b/zrpc/proto/zed.proto @@ -345,5 +345,5 @@ message Collaborator { message WorktreeMetadata { string root_name = 1; bool is_shared = 2; - repeated uint64 participants = 3; + repeated uint64 guests = 3; } From 65a3af9bde66c5e8018ae7982cf2cffe47eb580a Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 21 Sep 2021 12:20:32 -0700 Subject: [PATCH 29/43] Rerender list elements when they notify during events Co-Authored-By: Nathan Sobo --- gpui/src/elements/list.rs | 35 ++++++++++++++++++++++++++++++++--- gpui/src/presenter.rs | 7 +++++++ 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/gpui/src/elements/list.rs b/gpui/src/elements/list.rs index 3864bf3c80daf..9fecfd6e61ed0 100644 --- a/gpui/src/elements/list.rs +++ b/gpui/src/elements/list.rs @@ -12,6 +12,7 @@ use std::{cell::RefCell, collections::VecDeque, ops::Range, rc::Rc}; pub struct List { state: ListState, + invalidated_elements: Vec, } #[derive(Clone)] @@ -79,7 +80,10 @@ struct Height(f32); impl List { pub fn new(state: ListState) -> Self { - Self { state } + Self { + state, + invalidated_elements: Default::default(), + } } } @@ -258,10 +262,35 @@ impl Element for List { let mut handled = false; let mut state = self.state.0.borrow_mut(); - for (mut element, _) in state.visible_elements(bounds, scroll_top) { - handled = element.dispatch_event(event, cx) || handled; + let mut item_origin = bounds.origin() - vec2f(0., scroll_top.offset_in_item); + let mut cursor = state.items.cursor::(); + let mut new_items = cursor.slice(&Count(scroll_top.item_ix), Bias::Right, &()); + while let Some(item) = cursor.item() { + if item_origin.y() > bounds.max_y() { + break; + } + + if let ListItem::Rendered(element) = item { + let prev_notify_count = cx.notify_count(); + let mut element = element.clone(); + handled = element.dispatch_event(event, cx) || handled; + item_origin.set_y(item_origin.y() + element.size().y()); + if cx.notify_count() > prev_notify_count { + new_items.push(ListItem::Unrendered, &()); + self.invalidated_elements.push(element); + } else { + new_items.push(item.clone(), &()); + } + cursor.next(&()); + } else { + unreachable!(); + } } + new_items.push_tree(cursor.suffix(&()), &()); + drop(cursor); + state.items = new_items; + match event { Event::ScrollWheel { position, diff --git a/gpui/src/presenter.rs b/gpui/src/presenter.rs index d6765bec29876..354f0a0f821af 100644 --- a/gpui/src/presenter.rs +++ b/gpui/src/presenter.rs @@ -195,6 +195,7 @@ impl Presenter { text_layout_cache: &self.text_layout_cache, view_stack: Default::default(), invalidated_views: Default::default(), + notify_count: 0, app: cx, } } @@ -300,6 +301,7 @@ pub struct EventContext<'a> { pub font_cache: &'a FontCache, pub text_layout_cache: &'a TextLayoutCache, pub app: &'a mut MutableAppContext, + pub notify_count: usize, view_stack: Vec, invalidated_views: HashSet, } @@ -325,10 +327,15 @@ impl<'a> EventContext<'a> { } pub fn notify(&mut self) { + self.notify_count += 1; if let Some(view_id) = self.view_stack.last() { self.invalidated_views.insert(*view_id); } } + + pub fn notify_count(&self) -> usize { + self.notify_count + } } impl<'a> Deref for EventContext<'a> { From 412535420bdaf2fd3af686c118c2c7f7e589401d Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 21 Sep 2021 12:21:03 -0700 Subject: [PATCH 30/43] Allow joining worktrees by clicking them in the people panel Co-Authored-By: Antonio Scandurra Co-Authored-By: Nathan Sobo --- server/src/rpc/store.rs | 1 + zed/assets/themes/_base.toml | 14 +++++--- zed/src/menus.rs | 5 --- zed/src/people_panel.rs | 67 ++++++++++++++++++++++-------------- zed/src/theme.rs | 9 ++--- zed/src/user.rs | 10 +++--- zed/src/workspace.rs | 10 +++--- zrpc/proto/zed.proto | 7 ++-- 8 files changed, 74 insertions(+), 49 deletions(-) diff --git a/server/src/rpc/store.rs b/server/src/rpc/store.rs index f57e7ed59b891..80cd8e2b3be1e 100644 --- a/server/src/rpc/store.rs +++ b/server/src/rpc/store.rs @@ -202,6 +202,7 @@ impl Store { worktrees: Vec::new(), }); host.worktrees.push(proto::WorktreeMetadata { + id: *worktree_id, root_name: worktree.root_name.clone(), is_shared: worktree.share().is_ok(), guests: guests.into_iter().collect(), diff --git a/zed/assets/themes/_base.toml b/zed/assets/themes/_base.toml index 7b1fd8b9ffd75..58d0965defec6 100644 --- a/zed/assets/themes/_base.toml +++ b/zed/assets/themes/_base.toml @@ -125,14 +125,20 @@ color = "$text.1.color" [people_panel] extends = "$panel" +host_avatar = { corner_radius = 10, width = 20 } host_username = { extends = "$text.0", padding.left = 5 } -worktree_host_avatar = { corner_radius = 10, width = 20 } -worktree_guest_avatar = { corner_radius = 8, width = 16 } +guest_avatar = { corner_radius = 8, width = 16 } -[people_panel.worktree_name] -extends = "$text.0" +[people_panel.worktree] +extends = "$text.1" padding = { left = 5 } +[people_panel.hovered_worktree] +extends = "$text.0" +padding = { left = 8 } +# extends = "$people_panel.worktree" +# color = "$text.0.color" + [people_panel.tree_branch] width = 1 color = "$surface.2" diff --git a/zed/src/menus.rs b/zed/src/menus.rs index 9ba28f9cef875..1fe4bd922ac09 100644 --- a/zed/src/menus.rs +++ b/zed/src/menus.rs @@ -26,11 +26,6 @@ pub fn menus(state: &Arc) -> Vec> { keystroke: None, action: Box::new(workspace::ShareWorktree), }, - MenuItem::Action { - name: "Join", - keystroke: None, - action: Box::new(workspace::JoinWorktree(state.clone())), - }, MenuItem::Action { name: "Quit", keystroke: Some("cmd-q"), diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs index 26dbc19f74c00..24cd7c96bbcfc 100644 --- a/zed/src/people_panel.rs +++ b/zed/src/people_panel.rs @@ -4,13 +4,17 @@ use crate::{ Settings, }; use gpui::{ + action, elements::*, geometry::{rect::RectF, vector::vec2f}, - Element, ElementBox, Entity, FontCache, ModelHandle, RenderContext, Subscription, View, + platform::CursorStyle, + Element, ElementBox, Entity, LayoutContext, ModelHandle, RenderContext, Subscription, View, ViewContext, }; use postage::watch; +action!(JoinWorktree, u64); + pub struct PeoplePanel { collaborators: ListState, user_store: ModelHandle, @@ -33,11 +37,8 @@ impl PeoplePanel { let user_store = user_store.clone(); let settings = settings.clone(); move |ix, cx| { - Self::render_collaborator( - &user_store.read(cx).collaborators()[ix], - &settings.borrow().theme, - cx.font_cache(), - ) + let collaborators = user_store.read(cx).collaborators().clone(); + Self::render_collaborator(&collaborators[ix], &settings.borrow().theme, cx) } }, ), @@ -56,23 +57,26 @@ impl PeoplePanel { fn render_collaborator( collaborator: &Collaborator, theme: &Theme, - font_cache: &FontCache, + cx: &mut LayoutContext, ) -> ElementBox { let theme = &theme.people_panel; let worktree_count = collaborator.worktrees.len(); - let line_height = theme.worktree_name.text.line_height(font_cache); - let cap_height = theme.worktree_name.text.cap_height(font_cache); - let baseline_offset = theme.worktree_name.text.baseline_offset(font_cache); + let font_cache = cx.font_cache(); + let line_height = theme.worktree.text.line_height(font_cache); + let cap_height = theme.worktree.text.cap_height(font_cache); + let baseline_offset = theme.worktree.text.baseline_offset(font_cache); let tree_branch = theme.tree_branch; Flex::column() .with_child( Flex::row() - .with_children(collaborator.user.avatar.clone().map(|avatar| { - Image::new(avatar) - .with_style(theme.worktree_host_avatar) - .boxed() - })) + .with_children( + collaborator + .user + .avatar + .clone() + .map(|avatar| Image::new(avatar).with_style(theme.host_avatar).boxed()), + ) .with_child( Container::new( Label::new( @@ -92,6 +96,7 @@ impl PeoplePanel { .iter() .enumerate() .map(|(ix, worktree)| { + let worktree_id = worktree.id; Flex::row() .with_child( ConstrainedBox::new( @@ -136,21 +141,33 @@ impl PeoplePanel { .boxed(), ) .with_child( - Container::new( - Label::new( - worktree.root_name.clone(), - theme.worktree_name.text.clone(), - ) - .boxed(), + MouseEventHandler::new::( + worktree_id as usize, + cx, + |mouse_state, _| { + let style = if mouse_state.hovered { + &theme.hovered_worktree + } else { + &theme.worktree + }; + Container::new( + Label::new( + worktree.root_name.clone(), + style.text.clone(), + ) + .boxed(), + ) + .with_style(style.container) + .boxed() + }, ) - .with_style(theme.worktree_name.container) + .with_cursor_style(CursorStyle::PointingHand) + .on_click(move |cx| cx.dispatch_action(JoinWorktree(worktree_id))) .boxed(), ) .with_children(worktree.guests.iter().filter_map(|participant| { participant.avatar.clone().map(|avatar| { - Image::new(avatar) - .with_style(theme.worktree_guest_avatar) - .boxed() + Image::new(avatar).with_style(theme.guest_avatar).boxed() }) })) .boxed() diff --git a/zed/src/theme.rs b/zed/src/theme.rs index b7a4f41023dda..3dbf9d0601d32 100644 --- a/zed/src/theme.rs +++ b/zed/src/theme.rs @@ -109,10 +109,11 @@ pub struct ChatPanel { pub struct PeoplePanel { #[serde(flatten)] pub container: ContainerStyle, + pub host_avatar: ImageStyle, pub host_username: ContainedText, - pub worktree_name: ContainedText, - pub worktree_host_avatar: ImageStyle, - pub worktree_guest_avatar: ImageStyle, + pub worktree: ContainedText, + pub hovered_worktree: ContainedText, + pub guest_avatar: ImageStyle, pub tree_branch: TreeBranch, } @@ -161,7 +162,7 @@ pub struct Selector { pub active_item: ContainedLabel, } -#[derive(Deserialize)] +#[derive(Debug, Deserialize)] pub struct ContainedText { #[serde(flatten)] pub container: ContainerStyle, diff --git a/zed/src/user.rs b/zed/src/user.rs index 32c271257fc49..4c59f64b83503 100644 --- a/zed/src/user.rs +++ b/zed/src/user.rs @@ -28,6 +28,7 @@ pub struct Collaborator { #[derive(Debug)] pub struct WorktreeMetadata { + pub id: u64, pub root_name: String, pub is_shared: bool, pub guests: Vec>, @@ -36,7 +37,7 @@ pub struct WorktreeMetadata { pub struct UserStore { users: HashMap>, current_user: watch::Receiver>>, - collaborators: Vec, + collaborators: Arc<[Collaborator]>, rpc: Arc, http: Arc, _maintain_collaborators: Task<()>, @@ -64,7 +65,7 @@ impl UserStore { Self { users: Default::default(), current_user: current_user_rx, - collaborators: Default::default(), + collaborators: Arc::from([]), rpc: rpc.clone(), http, _maintain_collaborators: cx.spawn_weak(|this, mut cx| async move { @@ -127,7 +128,7 @@ impl UserStore { } this.update(&mut cx, |this, cx| { - this.collaborators = collaborators; + this.collaborators = collaborators.into(); cx.notify(); }); @@ -135,7 +136,7 @@ impl UserStore { }) } - pub fn collaborators(&self) -> &[Collaborator] { + pub fn collaborators(&self) -> &Arc<[Collaborator]> { &self.collaborators } @@ -235,6 +236,7 @@ impl Collaborator { ); } worktrees.push(WorktreeMetadata { + id: worktree.id, root_name: worktree.root_name, is_shared: worktree.is_shared, guests, diff --git a/zed/src/workspace.rs b/zed/src/workspace.rs index f3db51077dd42..89a48a99fdf92 100644 --- a/zed/src/workspace.rs +++ b/zed/src/workspace.rs @@ -7,7 +7,7 @@ use crate::{ editor::Buffer, fs::Fs, language::LanguageRegistry, - people_panel::PeoplePanel, + people_panel::{JoinWorktree, PeoplePanel}, project_browser::ProjectBrowser, rpc, settings::Settings, @@ -44,7 +44,6 @@ action!(Open, Arc); action!(OpenPaths, OpenParams); action!(OpenNew, Arc); action!(ShareWorktree); -action!(JoinWorktree, Arc); action!(Save); action!(DebugElements); @@ -59,6 +58,7 @@ pub fn init(cx: &mut MutableAppContext) { cx.add_action(Workspace::open_new_file); cx.add_action(Workspace::share_worktree); cx.add_action(Workspace::toggle_sidebar_item); + cx.add_action(Workspace::join_worktree); cx.add_bindings(vec![ Binding::new("cmd-s", Save, None), Binding::new("cmd-alt-i", DebugElements, None), @@ -839,14 +839,16 @@ impl Workspace { .detach(); } - fn join_worktree(&mut self, id: u64, cx: &mut ViewContext) { + fn join_worktree(&mut self, action: &JoinWorktree, cx: &mut ViewContext) { let rpc = self.rpc.clone(); let languages = self.languages.clone(); + let worktree_id = action.0; cx.spawn(|this, mut cx| { async move { rpc.authenticate_and_connect(&cx).await?; - let worktree = Worktree::open_remote(rpc.clone(), id, languages, &mut cx).await?; + let worktree = + Worktree::open_remote(rpc.clone(), worktree_id, languages, &mut cx).await?; this.update(&mut cx, |workspace, cx| { cx.observe(&worktree, |_, _, cx| cx.notify()).detach(); workspace.worktrees.insert(worktree); diff --git a/zrpc/proto/zed.proto b/zrpc/proto/zed.proto index be85fdecd6ba4..0f0ea69261c07 100644 --- a/zrpc/proto/zed.proto +++ b/zrpc/proto/zed.proto @@ -343,7 +343,8 @@ message Collaborator { } message WorktreeMetadata { - string root_name = 1; - bool is_shared = 2; - repeated uint64 guests = 3; + uint64 id = 1; + string root_name = 2; + bool is_shared = 3; + repeated uint64 guests = 4; } From 9352c4e0760c71e8613c32ec14fee399fdaab454 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 21 Sep 2021 13:29:06 -0700 Subject: [PATCH 31/43] Ignore empty ZED_IMPERSONATE env var --- zed/src/rpc.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/zed/src/rpc.rs b/zed/src/rpc.rs index 6ae83354e0e25..5dc2b49b76d9a 100644 --- a/zed/src/rpc.rs +++ b/zed/src/rpc.rs @@ -30,7 +30,9 @@ use zrpc::{ lazy_static! { static ref ZED_SERVER_URL: String = std::env::var("ZED_SERVER_URL").unwrap_or("https://zed.dev:443".to_string()); - static ref IMPERSONATE_LOGIN: Option = std::env::var("ZED_IMPERSONATE").ok(); + static ref IMPERSONATE_LOGIN: Option = std::env::var("ZED_IMPERSONATE") + .ok() + .and_then(|s| if s.is_empty() { None } else { Some(s) }); } pub struct Client { From 729896d32a664737fd50eab3c8e60dd007a91c5e Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 21 Sep 2021 13:35:00 -0700 Subject: [PATCH 32/43] Style worktree rows in people panel Co-Authored-By: Nathan Sobo --- zed/assets/themes/_base.toml | 23 +++++-- zed/src/people_panel.rs | 112 ++++++++++++++++++++++++----------- zed/src/theme.rs | 8 ++- 3 files changed, 100 insertions(+), 43 deletions(-) diff --git a/zed/assets/themes/_base.toml b/zed/assets/themes/_base.toml index 58d0965defec6..c30893de44818 100644 --- a/zed/assets/themes/_base.toml +++ b/zed/assets/themes/_base.toml @@ -128,16 +128,27 @@ extends = "$panel" host_avatar = { corner_radius = 10, width = 20 } host_username = { extends = "$text.0", padding.left = 5 } guest_avatar = { corner_radius = 8, width = 16 } +guest_avatar_spacing = 8 -[people_panel.worktree] +[people_panel.unshared_worktree] extends = "$text.1" padding = { left = 5 } -[people_panel.hovered_worktree] -extends = "$text.0" -padding = { left = 8 } -# extends = "$people_panel.worktree" -# color = "$text.0.color" +[people_panel.own_worktree] +extends = "$people_panel.unshared_worktree" +color = "$syntax.variant" + +[people_panel.joined_worktree] +extends = "$people_panel.own_worktree" + +[people_panel.shared_worktree] +extends = "$people_panel.unshared_worktree" +color = "$text.0.color" + +[people_panel.hovered_shared_worktree] +extends = "$people_panel.shared_worktree" +background = "$state.hover" +corner_radius = 6 [people_panel.tree_branch] width = 1 diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs index 24cd7c96bbcfc..74a6d2b614b1b 100644 --- a/zed/src/people_panel.rs +++ b/zed/src/people_panel.rs @@ -37,8 +37,15 @@ impl PeoplePanel { let user_store = user_store.clone(); let settings = settings.clone(); move |ix, cx| { - let collaborators = user_store.read(cx).collaborators().clone(); - Self::render_collaborator(&collaborators[ix], &settings.borrow().theme, cx) + let user_store = user_store.read(cx); + let collaborators = user_store.collaborators().clone(); + let current_user_id = user_store.current_user().map(|user| user.id); + Self::render_collaborator( + &collaborators[ix], + current_user_id, + &settings.borrow().theme, + cx, + ) } }, ), @@ -56,15 +63,16 @@ impl PeoplePanel { fn render_collaborator( collaborator: &Collaborator, + current_user_id: Option, theme: &Theme, cx: &mut LayoutContext, ) -> ElementBox { let theme = &theme.people_panel; let worktree_count = collaborator.worktrees.len(); let font_cache = cx.font_cache(); - let line_height = theme.worktree.text.line_height(font_cache); - let cap_height = theme.worktree.text.cap_height(font_cache); - let baseline_offset = theme.worktree.text.baseline_offset(font_cache); + let line_height = theme.unshared_worktree.text.line_height(font_cache); + let cap_height = theme.unshared_worktree.text.cap_height(font_cache); + let baseline_offset = theme.unshared_worktree.text.baseline_offset(font_cache); let tree_branch = theme.tree_branch; Flex::column() @@ -140,36 +148,70 @@ impl PeoplePanel { .with_height(line_height) .boxed(), ) - .with_child( - MouseEventHandler::new::( - worktree_id as usize, - cx, - |mouse_state, _| { - let style = if mouse_state.hovered { - &theme.hovered_worktree - } else { - &theme.worktree - }; - Container::new( - Label::new( - worktree.root_name.clone(), - style.text.clone(), - ) - .boxed(), - ) - .with_style(style.container) - .boxed() - }, - ) - .with_cursor_style(CursorStyle::PointingHand) - .on_click(move |cx| cx.dispatch_action(JoinWorktree(worktree_id))) - .boxed(), - ) - .with_children(worktree.guests.iter().filter_map(|participant| { - participant.avatar.clone().map(|avatar| { - Image::new(avatar).with_style(theme.guest_avatar).boxed() - }) - })) + .with_child({ + let mut worktree_row = + MouseEventHandler::new::( + worktree_id as usize, + cx, + |mouse_state, _| { + let style = + if Some(collaborator.user.id) == current_user_id { + &theme.own_worktree + } else if worktree.is_shared { + if worktree.guests.iter().any(|guest| { + Some(guest.id) == current_user_id + }) { + &theme.joined_worktree + } else if mouse_state.hovered { + &theme.hovered_shared_worktree + } else { + &theme.shared_worktree + } + } else { + &theme.unshared_worktree + }; + + Flex::row() + .with_child( + Container::new( + Label::new( + worktree.root_name.clone(), + style.text.clone(), + ) + .boxed(), + ) + .with_style(style.container) + .boxed(), + ) + .with_children(worktree.guests.iter().filter_map( + |participant| { + participant.avatar.clone().map(|avatar| { + Container::new( + Image::new(avatar) + .with_style(theme.guest_avatar) + .boxed(), + ) + .with_margin_left( + theme.guest_avatar_spacing, + ) + .boxed() + }) + }, + )) + .boxed() + }, + ); + + if worktree.is_shared { + worktree_row = worktree_row + .with_cursor_style(CursorStyle::PointingHand) + .on_click(move |cx| { + cx.dispatch_action(JoinWorktree(worktree_id)) + }); + } + + Expanded::new(1.0, worktree_row.boxed()).boxed() + }) .boxed() }), ) diff --git a/zed/src/theme.rs b/zed/src/theme.rs index 3dbf9d0601d32..112b4265ae058 100644 --- a/zed/src/theme.rs +++ b/zed/src/theme.rs @@ -111,9 +111,13 @@ pub struct PeoplePanel { pub container: ContainerStyle, pub host_avatar: ImageStyle, pub host_username: ContainedText, - pub worktree: ContainedText, - pub hovered_worktree: ContainedText, + pub own_worktree: ContainedText, + pub joined_worktree: ContainedText, + pub shared_worktree: ContainedText, + pub hovered_shared_worktree: ContainedText, + pub unshared_worktree: ContainedText, pub guest_avatar: ImageStyle, + pub guest_avatar_spacing: f32, pub tree_branch: TreeBranch, } From 6d0b84a467e7db06b72612a758d3a11b88279aef Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 21 Sep 2021 13:37:37 -0700 Subject: [PATCH 33/43] :lipstick: --- server/src/rpc/store.rs | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/server/src/rpc/store.rs b/server/src/rpc/store.rs index 80cd8e2b3be1e..cd4fff53ec87a 100644 --- a/server/src/rpc/store.rs +++ b/server/src/rpc/store.rs @@ -1,5 +1,4 @@ use crate::db::{ChannelId, UserId}; -use crate::errors::TideResultExt; use anyhow::anyhow; use std::collections::{hash_map, HashMap, HashSet}; use zrpc::{proto, ConnectionId}; @@ -190,23 +189,20 @@ impl Store { } } - if let Ok(host_user_id) = self - .user_id_for_connection(worktree.host_connection_id) - .context("stale worktree host connection") - { - let host = - collaborators - .entry(host_user_id) - .or_insert_with(|| proto::Collaborator { - user_id: host_user_id.to_proto(), - worktrees: Vec::new(), - }); - host.worktrees.push(proto::WorktreeMetadata { - id: *worktree_id, - root_name: worktree.root_name.clone(), - is_shared: worktree.share().is_ok(), - guests: guests.into_iter().collect(), - }); + if let Ok(host_user_id) = self.user_id_for_connection(worktree.host_connection_id) { + collaborators + .entry(host_user_id) + .or_insert_with(|| proto::Collaborator { + user_id: host_user_id.to_proto(), + worktrees: Vec::new(), + }) + .worktrees + .push(proto::WorktreeMetadata { + id: *worktree_id, + root_name: worktree.root_name.clone(), + is_shared: worktree.share.is_some(), + guests: guests.into_iter().collect(), + }); } } From b57639761043f410a6eb8ddcc9ea985e97532e8b Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 21 Sep 2021 15:09:31 -0700 Subject: [PATCH 34/43] Vertically align avatars with labels in people panel Co-Authored-By: Nathan Sobo --- gpui/src/elements/align.rs | 5 +++ gpui/src/elements/image.rs | 8 ++-- zed/src/people_panel.rs | 88 ++++++++++++++++++++++++-------------- 3 files changed, 66 insertions(+), 35 deletions(-) diff --git a/gpui/src/elements/align.rs b/gpui/src/elements/align.rs index 652a014ddad27..7f065e2b53578 100644 --- a/gpui/src/elements/align.rs +++ b/gpui/src/elements/align.rs @@ -25,6 +25,11 @@ impl Align { self } + pub fn left(mut self) -> Self { + self.alignment.set_x(-1.0); + self + } + pub fn right(mut self) -> Self { self.alignment.set_x(1.0); self diff --git a/gpui/src/elements/image.rs b/gpui/src/elements/image.rs index 57644986dc005..5d36828d0cd57 100644 --- a/gpui/src/elements/image.rs +++ b/gpui/src/elements/image.rs @@ -19,13 +19,13 @@ pub struct Image { #[derive(Copy, Clone, Default, Deserialize)] pub struct ImageStyle { #[serde(default)] - border: Border, + pub border: Border, #[serde(default)] - corner_radius: f32, + pub corner_radius: f32, #[serde(default)] - height: Option, + pub height: Option, #[serde(default)] - width: Option, + pub width: Option, } impl Image { diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs index 74a6d2b614b1b..acdae8331f91c 100644 --- a/zed/src/people_panel.rs +++ b/zed/src/people_panel.rs @@ -74,6 +74,16 @@ impl PeoplePanel { let cap_height = theme.unshared_worktree.text.cap_height(font_cache); let baseline_offset = theme.unshared_worktree.text.baseline_offset(font_cache); let tree_branch = theme.tree_branch; + let host_avatar_height = theme + .host_avatar + .width + .or(theme.host_avatar.height) + .unwrap_or(0.); + let guest_avatar_height = theme + .guest_avatar + .width + .or(theme.guest_avatar.height) + .unwrap_or(0.); Flex::column() .with_child( @@ -86,15 +96,23 @@ impl PeoplePanel { .map(|avatar| Image::new(avatar).with_style(theme.host_avatar).boxed()), ) .with_child( - Container::new( - Label::new( - collaborator.user.github_login.clone(), - theme.host_username.text.clone(), + ConstrainedBox::new( + Align::new( + Container::new( + Label::new( + collaborator.user.github_login.clone(), + theme.host_username.text.clone(), + ) + .boxed(), + ) + .with_style(theme.host_username.container) + .boxed() ) - .boxed(), + .left() + .boxed() ) - .with_style(theme.host_username.container) - .boxed(), + .with_height(host_avatar_height) + .boxed() ) .boxed(), ) @@ -171,33 +189,41 @@ impl PeoplePanel { &theme.unshared_worktree }; - Flex::row() - .with_child( - Container::new( - Label::new( - worktree.root_name.clone(), - style.text.clone(), - ) - .boxed(), - ) - .with_style(style.container) - .boxed(), - ) - .with_children(worktree.guests.iter().filter_map( - |participant| { - participant.avatar.clone().map(|avatar| { - Container::new( - Image::new(avatar) - .with_style(theme.guest_avatar) + Container::new( + Flex::row() + .with_child( + ConstrainedBox::new( + Align::new( + Label::new( + worktree.root_name.clone(), + style.text.clone(), + ) .boxed(), + ) + .left() + .boxed() ) - .with_margin_left( - theme.guest_avatar_spacing, - ) + .with_height(guest_avatar_height) .boxed() - }) - }, - )) + ) + .with_children(worktree.guests.iter().filter_map( + |participant| { + participant.avatar.clone().map(|avatar| { + Container::new( + Image::new(avatar) + .with_style(theme.guest_avatar) + .boxed(), + ) + .with_margin_left( + theme.guest_avatar_spacing, + ) + .boxed() + }) + }, + )) + .boxed() + ) + .with_style(style.container) .boxed() }, ); From 41a1514cec5a390f7a7a40f888870b5e490e680b Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 21 Sep 2021 15:24:14 -0700 Subject: [PATCH 35/43] Add chainable Element methods for common containers Co-Authored-By: Nathan Sobo --- gpui/src/elements.rs | 28 +++++++++++++++++++++ zed/src/people_panel.rs | 56 ++++++++++++++++------------------------- 2 files changed, 50 insertions(+), 34 deletions(-) diff --git a/gpui/src/elements.rs b/gpui/src/elements.rs index 08f8732e0e594..c8048ef3fa2f0 100644 --- a/gpui/src/elements.rs +++ b/gpui/src/elements.rs @@ -108,6 +108,34 @@ pub trait Element { element: Rc::new(RefCell::new(Lifecycle::Init { element: self })), }) } + + fn constrained(self) -> ConstrainedBox + where + Self: 'static + Sized, + { + ConstrainedBox::new(self.boxed()) + } + + fn aligned(self) -> Align + where + Self: 'static + Sized, + { + Align::new(self.boxed()) + } + + fn contained(self) -> Container + where + Self: 'static + Sized, + { + Container::new(self.boxed()) + } + + fn expanded(self, flex: f32) -> Expanded + where + Self: 'static + Sized, + { + Expanded::new(flex, self.boxed()) + } } pub enum Lifecycle { diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs index acdae8331f91c..6c770b703ac92 100644 --- a/zed/src/people_panel.rs +++ b/zed/src/people_panel.rs @@ -96,21 +96,15 @@ impl PeoplePanel { .map(|avatar| Image::new(avatar).with_style(theme.host_avatar).boxed()), ) .with_child( - ConstrainedBox::new( - Align::new( - Container::new( - Label::new( - collaborator.user.github_login.clone(), - theme.host_username.text.clone(), - ) - .boxed(), - ) - .with_style(theme.host_username.container) - .boxed() - ) - .left() - .boxed() + Label::new( + collaborator.user.github_login.clone(), + theme.host_username.text.clone(), ) + .contained() + .with_style(theme.host_username.container) + .aligned() + .left() + .constrained() .with_height(host_avatar_height) .boxed() ) @@ -192,32 +186,26 @@ impl PeoplePanel { Container::new( Flex::row() .with_child( - ConstrainedBox::new( - Align::new( - Label::new( - worktree.root_name.clone(), - style.text.clone(), - ) - .boxed(), - ) - .left() - .boxed() + Label::new( + worktree.root_name.clone(), + style.text.clone(), ) + .aligned() + .left() + .constrained() .with_height(guest_avatar_height) .boxed() ) .with_children(worktree.guests.iter().filter_map( |participant| { participant.avatar.clone().map(|avatar| { - Container::new( - Image::new(avatar) - .with_style(theme.guest_avatar) - .boxed(), - ) - .with_margin_left( - theme.guest_avatar_spacing, - ) - .boxed() + Image::new(avatar) + .with_style(theme.guest_avatar) + .contained() + .with_margin_left( + theme.guest_avatar_spacing, + ) + .boxed() }) }, )) @@ -236,7 +224,7 @@ impl PeoplePanel { }); } - Expanded::new(1.0, worktree_row.boxed()).boxed() + worktree_row.expanded(1.0).boxed() }) .boxed() }), From c24d439eb1dc1d8dc18ffae47bfaf3a0c352c49a Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 21 Sep 2021 16:27:57 -0700 Subject: [PATCH 36/43] Allow clicking on worktrees to share, unshare, join, and leave Co-Authored-By: Nathan Sobo --- zed/assets/themes/_base.toml | 15 ++-- zed/src/editor.rs | 6 ++ zed/src/editor/buffer.rs | 5 ++ zed/src/menus.rs | 10 --- zed/src/people_panel.rs | 135 +++++++++++++++++++---------------- zed/src/theme.rs | 3 +- zed/src/user.rs | 1 + zed/src/workspace.rs | 84 +++++++++++++++++++--- zed/src/worktree.rs | 35 +++++++++ 9 files changed, 200 insertions(+), 94 deletions(-) diff --git a/zed/assets/themes/_base.toml b/zed/assets/themes/_base.toml index c30893de44818..4ee79d60d4bf8 100644 --- a/zed/assets/themes/_base.toml +++ b/zed/assets/themes/_base.toml @@ -134,21 +134,18 @@ guest_avatar_spacing = 8 extends = "$text.1" padding = { left = 5 } -[people_panel.own_worktree] -extends = "$people_panel.unshared_worktree" -color = "$syntax.variant" - -[people_panel.joined_worktree] -extends = "$people_panel.own_worktree" +[people_panel.hovered_unshared_worktree] +extends = "$people_panel.shared_worktree" +background = "$state.hover" +corner_radius = 6 [people_panel.shared_worktree] extends = "$people_panel.unshared_worktree" color = "$text.0.color" [people_panel.hovered_shared_worktree] -extends = "$people_panel.shared_worktree" -background = "$state.hover" -corner_radius = 6 +extends = "$people_panel.hovered_unshared_worktree" +color = "$text.0.color" [people_panel.tree_branch] width = 1 diff --git a/zed/src/editor.rs b/zed/src/editor.rs index 25403d3aae44c..a06b7ee013787 100644 --- a/zed/src/editor.rs +++ b/zed/src/editor.rs @@ -2320,6 +2320,7 @@ impl Editor { buffer::Event::Saved => cx.emit(Event::Saved), buffer::Event::FileHandleChanged => cx.emit(Event::FileHandleChanged), buffer::Event::Reloaded => cx.emit(Event::FileHandleChanged), + buffer::Event::Closed => cx.emit(Event::Closed), buffer::Event::Reparsed => {} } } @@ -2449,6 +2450,7 @@ pub enum Event { Dirtied, Saved, FileHandleChanged, + Closed, } impl Entity for Editor { @@ -2556,6 +2558,10 @@ impl workspace::ItemView for Editor { matches!(event, Event::Activate) } + fn should_close_item_on_event(event: &Self::Event) -> bool { + matches!(event, Event::Closed) + } + fn should_update_tab_on_event(event: &Self::Event) -> bool { matches!( event, diff --git a/zed/src/editor/buffer.rs b/zed/src/editor/buffer.rs index 4c093032dd42c..8186b422aec89 100644 --- a/zed/src/editor/buffer.rs +++ b/zed/src/editor/buffer.rs @@ -801,6 +801,10 @@ impl Buffer { cx.emit(Event::FileHandleChanged); } + pub fn close(&mut self, cx: &mut ModelContext) { + cx.emit(Event::Closed); + } + pub fn language(&self) -> Option<&Arc> { self.language.as_ref() } @@ -2264,6 +2268,7 @@ pub enum Event { FileHandleChanged, Reloaded, Reparsed, + Closed, } impl Entity for Buffer { diff --git a/zed/src/menus.rs b/zed/src/menus.rs index 1fe4bd922ac09..e885d31209559 100644 --- a/zed/src/menus.rs +++ b/zed/src/menus.rs @@ -16,16 +16,6 @@ pub fn menus(state: &Arc) -> Vec> { action: Box::new(super::About), }, MenuItem::Separator, - MenuItem::Action { - name: "Sign In", - keystroke: None, - action: Box::new(super::Authenticate), - }, - MenuItem::Action { - name: "Share", - keystroke: None, - action: Box::new(workspace::ShareWorktree), - }, MenuItem::Action { name: "Quit", keystroke: Some("cmd-q"), diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs index 6c770b703ac92..78a0ec153fd46 100644 --- a/zed/src/people_panel.rs +++ b/zed/src/people_panel.rs @@ -14,6 +14,9 @@ use gpui::{ use postage::watch; action!(JoinWorktree, u64); +action!(LeaveWorktree, u64); +action!(ShareWorktree, u64); +action!(UnshareWorktree, u64); pub struct PeoplePanel { collaborators: ListState, @@ -106,7 +109,7 @@ impl PeoplePanel { .left() .constrained() .with_height(host_avatar_height) - .boxed() + .boxed(), ) .boxed(), ) @@ -161,70 +164,78 @@ impl PeoplePanel { .boxed(), ) .with_child({ - let mut worktree_row = - MouseEventHandler::new::( - worktree_id as usize, - cx, - |mouse_state, _| { - let style = - if Some(collaborator.user.id) == current_user_id { - &theme.own_worktree - } else if worktree.is_shared { - if worktree.guests.iter().any(|guest| { - Some(guest.id) == current_user_id - }) { - &theme.joined_worktree - } else if mouse_state.hovered { - &theme.hovered_shared_worktree - } else { - &theme.shared_worktree - } - } else { - &theme.unshared_worktree - }; + let is_host = Some(collaborator.user.id) == current_user_id; + let is_guest = !is_host + && worktree + .guests + .iter() + .any(|guest| Some(guest.id) == current_user_id); + let is_shared = worktree.is_shared; - Container::new( - Flex::row() - .with_child( - Label::new( - worktree.root_name.clone(), - style.text.clone(), - ) - .aligned() - .left() - .constrained() - .with_height(guest_avatar_height) - .boxed() - ) - .with_children(worktree.guests.iter().filter_map( - |participant| { - participant.avatar.clone().map(|avatar| { - Image::new(avatar) - .with_style(theme.guest_avatar) - .contained() - .with_margin_left( - theme.guest_avatar_spacing, - ) - .boxed() - }) - }, - )) - .boxed() - ) - .with_style(style.container) - .boxed() - }, - ); + MouseEventHandler::new::( + worktree_id as usize, + cx, + |mouse_state, _| { + let style = match (worktree.is_shared, mouse_state.hovered) + { + (false, false) => &theme.unshared_worktree, + (false, true) => &theme.hovered_unshared_worktree, + (true, false) => &theme.shared_worktree, + (true, true) => &theme.hovered_shared_worktree, + }; - if worktree.is_shared { - worktree_row = worktree_row - .with_cursor_style(CursorStyle::PointingHand) - .on_click(move |cx| { + Container::new( + Flex::row() + .with_child( + Label::new( + worktree.root_name.clone(), + style.text.clone(), + ) + .aligned() + .left() + .constrained() + .with_height(guest_avatar_height) + .boxed(), + ) + .with_children(worktree.guests.iter().filter_map( + |participant| { + participant.avatar.clone().map(|avatar| { + Image::new(avatar) + .with_style(theme.guest_avatar) + .contained() + .with_margin_left( + theme.guest_avatar_spacing, + ) + .boxed() + }) + }, + )) + .boxed(), + ) + .with_style(style.container) + .boxed() + }, + ) + .with_cursor_style(if is_host || is_shared { + CursorStyle::PointingHand + } else { + CursorStyle::Arrow + }) + .on_click(move |cx| { + if is_shared { + if is_host { + cx.dispatch_action(UnshareWorktree(worktree_id)); + } else if is_guest { + cx.dispatch_action(LeaveWorktree(worktree_id)); + } else { cx.dispatch_action(JoinWorktree(worktree_id)) - }); - } - - worktree_row.expanded(1.0).boxed() + } + } else if is_host { + cx.dispatch_action(ShareWorktree(worktree_id)); + } + }) + .expanded(1.0) + .boxed() }) .boxed() }), diff --git a/zed/src/theme.rs b/zed/src/theme.rs index 112b4265ae058..568bb29f5aad3 100644 --- a/zed/src/theme.rs +++ b/zed/src/theme.rs @@ -111,11 +111,10 @@ pub struct PeoplePanel { pub container: ContainerStyle, pub host_avatar: ImageStyle, pub host_username: ContainedText, - pub own_worktree: ContainedText, - pub joined_worktree: ContainedText, pub shared_worktree: ContainedText, pub hovered_shared_worktree: ContainedText, pub unshared_worktree: ContainedText, + pub hovered_unshared_worktree: ContainedText, pub guest_avatar: ImageStyle, pub guest_avatar_spacing: f32, pub tree_branch: TreeBranch, diff --git a/zed/src/user.rs b/zed/src/user.rs index 4c59f64b83503..7467b16f9b524 100644 --- a/zed/src/user.rs +++ b/zed/src/user.rs @@ -128,6 +128,7 @@ impl UserStore { } this.update(&mut cx, |this, cx| { + collaborators.sort_by(|a, b| a.user.github_login.cmp(&b.user.github_login)); this.collaborators = collaborators.into(); cx.notify(); }); diff --git a/zed/src/workspace.rs b/zed/src/workspace.rs index 89a48a99fdf92..4581e38d908ef 100644 --- a/zed/src/workspace.rs +++ b/zed/src/workspace.rs @@ -7,7 +7,7 @@ use crate::{ editor::Buffer, fs::Fs, language::LanguageRegistry, - people_panel::{JoinWorktree, PeoplePanel}, + people_panel::{JoinWorktree, LeaveWorktree, PeoplePanel, ShareWorktree, UnshareWorktree}, project_browser::ProjectBrowser, rpc, settings::Settings, @@ -43,7 +43,6 @@ use std::{ action!(Open, Arc); action!(OpenPaths, OpenParams); action!(OpenNew, Arc); -action!(ShareWorktree); action!(Save); action!(DebugElements); @@ -56,9 +55,11 @@ pub fn init(cx: &mut MutableAppContext) { cx.add_action(Workspace::save_active_item); cx.add_action(Workspace::debug_elements); cx.add_action(Workspace::open_new_file); - cx.add_action(Workspace::share_worktree); cx.add_action(Workspace::toggle_sidebar_item); + cx.add_action(Workspace::share_worktree); + cx.add_action(Workspace::unshare_worktree); cx.add_action(Workspace::join_worktree); + cx.add_action(Workspace::leave_worktree); cx.add_bindings(vec![ Binding::new("cmd-s", Save, None), Binding::new("cmd-alt-i", DebugElements, None), @@ -175,6 +176,9 @@ pub trait ItemView: View { fn should_activate_item_on_event(_: &Self::Event) -> bool { false } + fn should_close_item_on_event(_: &Self::Event) -> bool { + false + } fn should_update_tab_on_event(_: &Self::Event) -> bool { false } @@ -273,6 +277,10 @@ impl ItemViewHandle for ViewHandle { fn set_parent_pane(&self, pane: &ViewHandle, cx: &mut MutableAppContext) { pane.update(cx, |_, cx| { cx.subscribe(self, |pane, item, event, cx| { + if T::should_close_item_on_event(event) { + pane.close_item(item.id(), cx); + return; + } if T::should_activate_item_on_event(event) { if let Some(ix) = pane.item_index(&item) { pane.activate_item(ix, cx); @@ -814,21 +822,33 @@ impl Workspace { }; } - fn share_worktree(&mut self, _: &ShareWorktree, cx: &mut ViewContext) { + fn share_worktree(&mut self, action: &ShareWorktree, cx: &mut ViewContext) { let rpc = self.rpc.clone(); + let remote_id = action.0; cx.spawn(|this, mut cx| { async move { rpc.authenticate_and_connect(&cx).await?; - let share_task = this.update(&mut cx, |this, cx| { - let worktree = this.worktrees.iter().next()?; - worktree.update(cx, |worktree, cx| { - let worktree = worktree.as_local_mut()?; - Some(worktree.share(cx)) - }) + let task = this.update(&mut cx, |this, cx| { + for worktree in &this.worktrees { + let task = worktree.update(cx, |worktree, cx| { + worktree.as_local_mut().and_then(|worktree| { + if worktree.remote_id() == Some(remote_id) { + Some(worktree.share(cx)) + } else { + None + } + }) + }); + + if task.is_some() { + return task; + } + } + None }); - if let Some(share_task) = share_task { + if let Some(share_task) = task { share_task.await?; } @@ -839,6 +859,23 @@ impl Workspace { .detach(); } + fn unshare_worktree(&mut self, action: &UnshareWorktree, cx: &mut ViewContext) { + let remote_id = action.0; + for worktree in &self.worktrees { + if worktree.update(cx, |worktree, cx| { + if let Some(worktree) = worktree.as_local_mut() { + if worktree.remote_id() == Some(remote_id) { + worktree.unshare(cx); + return true; + } + } + false + }) { + break; + } + } + } + fn join_worktree(&mut self, action: &JoinWorktree, cx: &mut ViewContext) { let rpc = self.rpc.clone(); let languages = self.languages.clone(); @@ -862,6 +899,31 @@ impl Workspace { .detach(); } + fn leave_worktree(&mut self, action: &LeaveWorktree, cx: &mut ViewContext) { + let remote_id = action.0; + cx.spawn(|this, mut cx| { + async move { + this.update(&mut cx, |this, cx| { + this.worktrees.retain(|worktree| { + worktree.update(cx, |worktree, cx| { + if let Some(worktree) = worktree.as_remote_mut() { + if worktree.remote_id() == remote_id { + worktree.close_all_buffers(cx); + return false; + } + } + true + }) + }) + }); + + Ok(()) + } + .log_err() + }) + .detach(); + } + fn add_pane(&mut self, cx: &mut ViewContext) -> ViewHandle { let pane = cx.add_view(|_| Pane::new(self.settings.clone())); let pane_id = pane.id(); diff --git a/zed/src/worktree.rs b/zed/src/worktree.rs index 521eabb65e03c..803304e6f3e04 100644 --- a/zed/src/worktree.rs +++ b/zed/src/worktree.rs @@ -943,6 +943,10 @@ impl LocalWorktree { } } + pub fn remote_id(&self) -> Option { + *self.remote_id.borrow() + } + pub fn next_remote_id(&self) -> impl Future> { let mut remote_id = self.remote_id.clone(); async move { @@ -1095,6 +1099,23 @@ impl LocalWorktree { }) } + pub fn unshare(&mut self, cx: &mut ModelContext) { + self.share.take(); + let rpc = self.rpc.clone(); + let remote_id = self.remote_id(); + cx.foreground() + .spawn( + async move { + if let Some(worktree_id) = remote_id { + rpc.send(proto::UnshareWorktree { worktree_id }).await?; + } + Ok(()) + } + .log_err(), + ) + .detach() + } + fn share_request(&self, cx: &mut ModelContext) -> Task> { let remote_id = self.next_remote_id(); let snapshot = self.snapshot(); @@ -1229,6 +1250,20 @@ impl RemoteWorktree { }) } + pub fn remote_id(&self) -> u64 { + self.remote_id + } + + pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) { + for (_, buffer) in self.open_buffers.drain() { + if let RemoteBuffer::Loaded(buffer) = buffer { + if let Some(buffer) = buffer.upgrade(cx) { + buffer.update(cx, |buffer, cx| buffer.close(cx)) + } + } + } + } + fn snapshot(&self) -> Snapshot { self.snapshot.clone() } From d67227177af8bc17a66f5d7df62c1405d64f5dab Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 22 Sep 2021 12:38:31 +0200 Subject: [PATCH 37/43] Allow leaving worktree while opening a buffer --- server/src/rpc.rs | 63 +++++++++++++++++++++++++++++++++++++++++++++ zed/src/worktree.rs | 15 +++++++---- 2 files changed, 73 insertions(+), 5 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index 51df8fea7a32c..ae71f543efdb5 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -1378,6 +1378,69 @@ mod tests { buffer_b.condition(&cx_b, |buf, _| buf.text() == text).await; } + #[gpui::test] + async fn test_leaving_worktree_while_opening_buffer( + mut cx_a: TestAppContext, + mut cx_b: TestAppContext, + ) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::new()); + + // Connect to a server as 2 clients. + let mut server = TestServer::start().await; + let (client_a, _) = server.create_client(&mut cx_a, "user_a").await; + let (client_b, _) = server.create_client(&mut cx_b, "user_b").await; + + // Share a local worktree as client A + let fs = Arc::new(FakeFs::new()); + fs.insert_tree( + "/dir", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.txt": "a-contents", + }), + ) + .await; + let worktree_a = Worktree::open_local( + client_a.clone(), + "/dir".as_ref(), + fs, + lang_registry.clone(), + &mut cx_a.to_async(), + ) + .await + .unwrap(); + worktree_a + .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let worktree_id = worktree_a + .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx)) + .await + .unwrap(); + + // Join that worktree as client B, and see that a guest has joined as client A. + let worktree_b = Worktree::open_remote( + client_b.clone(), + worktree_id, + lang_registry.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + worktree_a + .condition(&cx_a, |tree, _| tree.peers().len() == 1) + .await; + + let buffer_b = cx_b + .background() + .spawn(worktree_b.update(&mut cx_b, |worktree, cx| worktree.open_buffer("a.txt", cx))); + cx_b.update(|_| drop(worktree_b)); + drop(buffer_b); + worktree_a + .condition(&cx_a, |tree, _| tree.peers().len() == 0) + .await; + } + #[gpui::test] async fn test_peer_disconnection(mut cx_a: TestAppContext, cx_b: TestAppContext) { cx_a.foreground().forbid_parking(); diff --git a/zed/src/worktree.rs b/zed/src/worktree.rs index 803304e6f3e04..a3c5b4e4c47f1 100644 --- a/zed/src/worktree.rs +++ b/zed/src/worktree.rs @@ -1196,12 +1196,11 @@ impl RemoteWorktree { path: &Path, cx: &mut ModelContext, ) -> Task>> { - let handle = cx.handle(); let mut existing_buffer = None; self.open_buffers.retain(|_buffer_id, buffer| { if let Some(buffer) = buffer.upgrade(cx.as_ref()) { if let Some(file) = buffer.read(cx.as_ref()).file() { - if file.worktree_id() == handle.id() && file.path.as_ref() == path { + if file.worktree_id() == cx.model_id() && file.path.as_ref() == path { existing_buffer = Some(buffer); } } @@ -1215,21 +1214,27 @@ impl RemoteWorktree { let replica_id = self.replica_id; let remote_worktree_id = self.remote_id; let path = path.to_string_lossy().to_string(); - cx.spawn(|this, mut cx| async move { + cx.spawn_weak(|this, mut cx| async move { if let Some(existing_buffer) = existing_buffer { Ok(existing_buffer) } else { let entry = this + .upgrade(&cx) + .ok_or_else(|| anyhow!("worktree was closed"))? .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned()) .ok_or_else(|| anyhow!("file does not exist"))?; - let file = File::new(entry.id, handle, entry.path, entry.mtime); - let language = cx.read(|cx| file.select_language(cx)); let response = rpc .request(proto::OpenBuffer { worktree_id: remote_worktree_id as u64, path, }) .await?; + + let this = this + .upgrade(&cx) + .ok_or_else(|| anyhow!("worktree was closed"))?; + let file = File::new(entry.id, this.clone(), entry.path, entry.mtime); + let language = cx.read(|cx| file.select_language(cx)); let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?; let buffer_id = remote_buffer.id as usize; let buffer = cx.add_model(|cx| { From 78fbd1307adc212beb098838a9881edce35f36a3 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 22 Sep 2021 14:50:32 +0200 Subject: [PATCH 38/43] Remove remote worktrees and close their buffers when host unshares --- server/src/rpc.rs | 90 ++++++++++++++++++++++++++++++++++++++++++++ zed/src/workspace.rs | 24 ++++++++++-- zed/src/worktree.rs | 17 ++++++++- 3 files changed, 127 insertions(+), 4 deletions(-) diff --git a/server/src/rpc.rs b/server/src/rpc.rs index ae71f543efdb5..fec6182fcc1ff 100644 --- a/server/src/rpc.rs +++ b/server/src/rpc.rs @@ -968,10 +968,12 @@ mod tests { editor::{Editor, EditorStyle, Insert}, fs::{FakeFs, Fs as _}, language::LanguageRegistry, + people_panel::JoinWorktree, rpc::{self, Client, Credentials, EstablishConnectionError}, settings, test::FakeHttpClient, user::UserStore, + workspace::Workspace, worktree::Worktree, }; use zrpc::Peer; @@ -1087,6 +1089,94 @@ mod tests { .await; } + #[gpui::test] + async fn test_unshare_worktree(mut cx_a: TestAppContext, mut cx_b: TestAppContext) { + cx_b.update(zed::workspace::init); + let lang_registry = Arc::new(LanguageRegistry::new()); + + // Connect to a server as 2 clients. + let mut server = TestServer::start().await; + let (client_a, _) = server.create_client(&mut cx_a, "user_a").await; + let (client_b, user_store_b) = server.create_client(&mut cx_b, "user_b").await; + let app_state_b = zed::AppState { + rpc: client_b, + user_store: user_store_b, + ..Arc::try_unwrap(cx_b.update(zed::test::test_app_state)) + .ok() + .unwrap() + }; + + cx_a.foreground().forbid_parking(); + + // Share a local worktree as client A + let fs = Arc::new(FakeFs::new()); + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.txt": "a-contents", + "b.txt": "b-contents", + }), + ) + .await; + let worktree_a = Worktree::open_local( + client_a.clone(), + "/a".as_ref(), + fs, + lang_registry.clone(), + &mut cx_a.to_async(), + ) + .await + .unwrap(); + worktree_a + .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + + let remote_worktree_id = worktree_a + .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx)) + .await + .unwrap(); + + let (window_b, workspace_b) = cx_b.add_window(|cx| Workspace::new(&app_state_b, cx)); + cx_b.update(|cx| { + cx.dispatch_action( + window_b, + vec![workspace_b.id()], + &JoinWorktree(remote_worktree_id), + ); + }); + workspace_b + .condition(&cx_b, |workspace, _| workspace.worktrees().len() == 1) + .await; + + let local_worktree_id_b = workspace_b.read_with(&cx_b, |workspace, cx| { + let active_pane = workspace.active_pane().read(cx); + assert!(active_pane.active_item().is_none()); + workspace.worktrees().iter().next().unwrap().id() + }); + workspace_b + .update(&mut cx_b, |worktree, cx| { + worktree.open_entry((local_worktree_id_b, Path::new("a.txt").into()), cx) + }) + .unwrap() + .await; + workspace_b.read_with(&cx_b, |workspace, cx| { + let active_pane = workspace.active_pane().read(cx); + assert!(active_pane.active_item().is_some()); + }); + + worktree_a.update(&mut cx_a, |tree, cx| { + tree.as_local_mut().unwrap().unshare(cx); + }); + workspace_b + .condition(&cx_b, |workspace, _| workspace.worktrees().len() == 0) + .await; + workspace_b.read_with(&cx_b, |workspace, cx| { + let active_pane = workspace.active_pane().read(cx); + assert!(active_pane.active_item().is_none()); + }); + } + #[gpui::test] async fn test_propagate_saves_and_fs_changes_in_shared_worktree( mut cx_a: TestAppContext, diff --git a/zed/src/workspace.rs b/zed/src/workspace.rs index 4581e38d908ef..19071b7326d31 100644 --- a/zed/src/workspace.rs +++ b/zed/src/workspace.rs @@ -13,7 +13,7 @@ use crate::{ settings::Settings, user, util::TryFutureExt as _, - worktree::{File, Worktree}, + worktree::{self, File, Worktree}, AppState, Authenticate, }; use anyhow::Result; @@ -886,9 +886,27 @@ impl Workspace { rpc.authenticate_and_connect(&cx).await?; let worktree = Worktree::open_remote(rpc.clone(), worktree_id, languages, &mut cx).await?; - this.update(&mut cx, |workspace, cx| { + this.update(&mut cx, |this, cx| { cx.observe(&worktree, |_, _, cx| cx.notify()).detach(); - workspace.worktrees.insert(worktree); + cx.subscribe(&worktree, move |this, _, event, cx| match event { + worktree::Event::Closed => { + this.worktrees.retain(|worktree| { + worktree.update(cx, |worktree, cx| { + if let Some(worktree) = worktree.as_remote_mut() { + if worktree.remote_id() == worktree_id { + worktree.close_all_buffers(cx); + return false; + } + } + true + }) + }); + + cx.notify(); + } + }) + .detach(); + this.worktrees.insert(worktree); cx.notify(); }); diff --git a/zed/src/worktree.rs b/zed/src/worktree.rs index a3c5b4e4c47f1..de819af1411ca 100644 --- a/zed/src/worktree.rs +++ b/zed/src/worktree.rs @@ -62,8 +62,12 @@ pub enum Worktree { Remote(RemoteWorktree), } +pub enum Event { + Closed, +} + impl Entity for Worktree { - type Event = (); + type Event = Event; fn release(&mut self, cx: &mut MutableAppContext) { match self { @@ -223,6 +227,7 @@ impl Worktree { rpc.subscribe_to_entity(remote_id, cx, Self::handle_update), rpc.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer), rpc.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved), + rpc.subscribe_to_entity(remote_id, cx, Self::handle_unshare), ]; Worktree::Remote(RemoteWorktree { @@ -522,6 +527,16 @@ impl Worktree { Ok(()) } + pub fn handle_unshare( + &mut self, + _: TypedEnvelope, + _: Arc, + cx: &mut ModelContext, + ) -> Result<()> { + cx.emit(Event::Closed); + Ok(()) + } + fn poll_snapshot(&mut self, cx: &mut ModelContext) { match self { Self::Local(worktree) => { From b47422ae289e9636b98cd43ce0cf690289d32a47 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 22 Sep 2021 15:23:54 +0200 Subject: [PATCH 39/43] Maintain connections correctly when leaving or unsharing worktrees --- server/src/rpc/store.rs | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/server/src/rpc/store.rs b/server/src/rpc/store.rs index cd4fff53ec87a..1d77e6b5effde 100644 --- a/server/src/rpc/store.rs +++ b/server/src/rpc/store.rs @@ -305,16 +305,20 @@ impl Store { } let connection_ids = worktree.connection_ids(); - - if let Some(_) = worktree.share.take() { - for connection_id in &connection_ids { - if let Some(connection) = self.connections.get_mut(connection_id) { + let collaborator_ids = worktree.collaborator_user_ids.clone(); + if let Some(share) = worktree.share.take() { + for connection_id in share.guest_connection_ids.into_keys() { + if let Some(connection) = self.connections.get_mut(&connection_id) { connection.worktrees.remove(&worktree_id); } } + + #[cfg(test)] + self.check_invariants(); + Ok(UnsharedWorktree { connection_ids, - collaborator_ids: worktree.collaborator_user_ids.clone(), + collaborator_ids, }) } else { Err(anyhow!("worktree is not shared"))? @@ -352,9 +356,13 @@ impl Store { } share.active_replica_ids.insert(replica_id); share.guest_connection_ids.insert(connection_id, replica_id); + + #[cfg(test)] + self.check_invariants(); + Ok(JoinedWorktree { replica_id, - worktree, + worktree: &self.worktrees[&worktree_id], }) } @@ -367,9 +375,19 @@ impl Store { let share = worktree.share.as_mut()?; let replica_id = share.guest_connection_ids.remove(&connection_id)?; share.active_replica_ids.remove(&replica_id); + + let connection = self.connections.get_mut(&connection_id)?; + connection.worktrees.remove(&worktree_id); + + let connection_ids = worktree.connection_ids(); + let collaborator_ids = worktree.collaborator_user_ids.clone(); + + #[cfg(test)] + self.check_invariants(); + Some(LeftWorktree { - connection_ids: worktree.connection_ids(), - collaborator_ids: worktree.collaborator_user_ids.clone(), + connection_ids, + collaborator_ids, }) } From 3e65fb426715f7309b6ade4d967b1c80e99ec7c3 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 22 Sep 2021 15:33:13 +0200 Subject: [PATCH 40/43] Leave joined worktrees when guest loses connection --- server/src/rpc/store.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/server/src/rpc/store.rs b/server/src/rpc/store.rs index 1d77e6b5effde..b33cd135589d5 100644 --- a/server/src/rpc/store.rs +++ b/server/src/rpc/store.rs @@ -81,7 +81,7 @@ impl Store { &mut self, connection_id: ConnectionId, ) -> tide::Result { - let connection = if let Some(connection) = self.connections.get(&connection_id) { + let connection = if let Some(connection) = self.connections.remove(&connection_id) { connection } else { return Err(anyhow!("no such connection"))?; @@ -109,18 +109,17 @@ impl Store { .collaborator_ids .extend(worktree.collaborator_user_ids.iter().copied()); result.hosted_worktrees.insert(worktree_id, worktree); - } else { - if let Some(worktree) = self.worktrees.get(&worktree_id) { - result - .guest_worktree_ids - .insert(worktree_id, worktree.connection_ids()); - result - .collaborator_ids - .extend(worktree.collaborator_user_ids.iter().copied()); - } + } else if let Some(worktree) = self.leave_worktree(connection_id, worktree_id) { + result + .guest_worktree_ids + .insert(worktree_id, worktree.connection_ids); + result.collaborator_ids.extend(worktree.collaborator_ids); } } + #[cfg(test)] + self.check_invariants(); + Ok(result) } @@ -376,8 +375,9 @@ impl Store { let replica_id = share.guest_connection_ids.remove(&connection_id)?; share.active_replica_ids.remove(&replica_id); - let connection = self.connections.get_mut(&connection_id)?; - connection.worktrees.remove(&worktree_id); + if let Some(connection) = self.connections.get_mut(&connection_id) { + connection.worktrees.remove(&worktree_id); + } let connection_ids = worktree.connection_ids(); let collaborator_ids = worktree.collaborator_user_ids.clone(); From 257744ac36c3dac2dfcfebe9944239c1d134d7e1 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 22 Sep 2021 16:04:44 +0200 Subject: [PATCH 41/43] Fix resolution of extends directive when children are unresolved Co-Authored-By: Nathan Sobo --- zed/src/theme/resolution.rs | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/zed/src/theme/resolution.rs b/zed/src/theme/resolution.rs index fd3864e274af2..acebf72b86120 100644 --- a/zed/src/theme/resolution.rs +++ b/zed/src/theme/resolution.rs @@ -237,9 +237,12 @@ impl Tree { fn update_resolved(&self) { match &mut *self.0.borrow_mut() { Node::Object { - resolved, children, .. + resolved, + base, + children, + .. } => { - *resolved = children.values().all(|c| c.is_resolved()); + *resolved = base.is_none() && children.values().all(|c| c.is_resolved()); } Node::Array { resolved, children, .. @@ -261,6 +264,9 @@ impl Tree { if tree.is_resolved() { while let Some(parent) = tree.parent() { parent.update_resolved(); + if !parent.is_resolved() { + break; + } tree = parent; } } @@ -330,9 +336,10 @@ impl Tree { made_progress = true; } - if let Node::Object { resolved, .. } = &mut *self.0.borrow_mut() { + if let Node::Object { resolved, base, .. } = &mut *self.0.borrow_mut() { if has_base { if resolved_base.is_some() { + base.take(); *resolved = true; } else { unresolved.push(self.clone()); @@ -341,6 +348,8 @@ impl Tree { *resolved = true; } } + } else if base.is_some() { + unresolved.push(self.clone()); } Ok(made_progress) @@ -427,6 +436,7 @@ mod test { fn test_references() { let json = serde_json::json!({ "a": { + "extends": "$g", "x": "$b.d" }, "b": { @@ -436,6 +446,9 @@ mod test { "e": { "extends": "$a", "f": "1" + }, + "g": { + "h": 2 } }); @@ -443,19 +456,27 @@ mod test { resolve_references(json).unwrap(), serde_json::json!({ "a": { - "x": "1" + "extends": "$g", + "x": "1", + "h": 2 }, "b": { "c": { - "x": "1" + "extends": "$g", + "x": "1", + "h": 2 }, "d": "1" }, "e": { "extends": "$a", "f": "1", - "x": "1" + "x": "1", + "h": 2 }, + "g": { + "h": 2 + } }) ) } From 23d77e2b9fe53bf57fc5b8b7d1d437a8d4b11729 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 22 Sep 2021 17:02:50 +0200 Subject: [PATCH 42/43] Refine people panel styling Co-Authored-By: Nathan Sobo --- gpui/src/elements/container.rs | 5 + zed/assets/themes/_base.toml | 33 ++++--- zed/src/people_panel.rs | 169 +++++++++++++++++---------------- zed/src/theme.rs | 26 ++--- 4 files changed, 126 insertions(+), 107 deletions(-) diff --git a/gpui/src/elements/container.rs b/gpui/src/elements/container.rs index abbafe4d034e7..dd58cf07398ed 100644 --- a/gpui/src/elements/container.rs +++ b/gpui/src/elements/container.rs @@ -57,6 +57,11 @@ impl Container { self } + pub fn with_margin_right(mut self, margin: f32) -> Self { + self.style.margin.right = margin; + self + } + pub fn with_horizontal_padding(mut self, padding: f32) -> Self { self.style.padding.left = padding; self.style.padding.right = padding; diff --git a/zed/assets/themes/_base.toml b/zed/assets/themes/_base.toml index 4ee79d60d4bf8..5938032c2c8c0 100644 --- a/zed/assets/themes/_base.toml +++ b/zed/assets/themes/_base.toml @@ -125,31 +125,38 @@ color = "$text.1.color" [people_panel] extends = "$panel" +host_row_height = 28 host_avatar = { corner_radius = 10, width = 20 } -host_username = { extends = "$text.0", padding.left = 5 } +host_username = { extends = "$text.0", padding.left = 8 } +tree_branch_width = 1 +tree_branch_color = "$surface.2" + +[people_panel.worktree] +height = 24 +padding = { left = 8 } guest_avatar = { corner_radius = 8, width = 16 } -guest_avatar_spacing = 8 +guest_avatar_spacing = 4 -[people_panel.unshared_worktree] +[people_panel.worktree.name] extends = "$text.1" -padding = { left = 5 } +margin = { right = 6 } + +[people_panel.unshared_worktree] +extends = "$people_panel.worktree" [people_panel.hovered_unshared_worktree] -extends = "$people_panel.shared_worktree" +extends = "$people_panel.unshared_worktree" background = "$state.hover" corner_radius = 6 [people_panel.shared_worktree] -extends = "$people_panel.unshared_worktree" -color = "$text.0.color" +extends = "$people_panel.worktree" +name.color = "$text.0.color" [people_panel.hovered_shared_worktree] -extends = "$people_panel.hovered_unshared_worktree" -color = "$text.0.color" - -[people_panel.tree_branch] -width = 1 -color = "$surface.2" +extends = "$people_panel.shared_worktree" +background = "$state.hover" +corner_radius = 6 [selector] background = "$surface.0" diff --git a/zed/src/people_panel.rs b/zed/src/people_panel.rs index 78a0ec153fd46..10d3bdaa0274c 100644 --- a/zed/src/people_panel.rs +++ b/zed/src/people_panel.rs @@ -73,31 +73,32 @@ impl PeoplePanel { let theme = &theme.people_panel; let worktree_count = collaborator.worktrees.len(); let font_cache = cx.font_cache(); - let line_height = theme.unshared_worktree.text.line_height(font_cache); - let cap_height = theme.unshared_worktree.text.cap_height(font_cache); - let baseline_offset = theme.unshared_worktree.text.baseline_offset(font_cache); - let tree_branch = theme.tree_branch; + let line_height = theme.unshared_worktree.name.text.line_height(font_cache); + let cap_height = theme.unshared_worktree.name.text.cap_height(font_cache); + let baseline_offset = theme + .unshared_worktree + .name + .text + .baseline_offset(font_cache) + + (theme.unshared_worktree.height - line_height) / 2.; + let tree_branch_width = theme.tree_branch_width; + let tree_branch_color = theme.tree_branch_color; let host_avatar_height = theme .host_avatar .width .or(theme.host_avatar.height) .unwrap_or(0.); - let guest_avatar_height = theme - .guest_avatar - .width - .or(theme.guest_avatar.height) - .unwrap_or(0.); Flex::column() .with_child( Flex::row() - .with_children( - collaborator - .user - .avatar - .clone() - .map(|avatar| Image::new(avatar).with_style(theme.host_avatar).boxed()), - ) + .with_children(collaborator.user.avatar.clone().map(|avatar| { + Image::new(avatar) + .with_style(theme.host_avatar) + .aligned() + .left() + .boxed() + })) .with_child( Label::new( collaborator.user.github_login.clone(), @@ -107,10 +108,10 @@ impl PeoplePanel { .with_style(theme.host_username.container) .aligned() .left() - .constrained() - .with_height(host_avatar_height) .boxed(), ) + .constrained() + .with_height(theme.host_row_height) .boxed(), ) .with_children( @@ -120,47 +121,45 @@ impl PeoplePanel { .enumerate() .map(|(ix, worktree)| { let worktree_id = worktree.id; + Flex::row() .with_child( - ConstrainedBox::new( - Canvas::new(move |bounds, _, cx| { - let start_x = bounds.min_x() + (bounds.width() / 2.) - - (tree_branch.width / 2.); - let end_x = bounds.max_x(); - let start_y = bounds.min_y(); - let end_y = - bounds.min_y() + baseline_offset - (cap_height / 2.); + Canvas::new(move |bounds, _, cx| { + let start_x = bounds.min_x() + (bounds.width() / 2.) + - (tree_branch_width / 2.); + let end_x = bounds.max_x(); + let start_y = bounds.min_y(); + let end_y = + bounds.min_y() + baseline_offset - (cap_height / 2.); - cx.scene.push_quad(gpui::Quad { - bounds: RectF::from_points( - vec2f(start_x, start_y), - vec2f( - start_x + tree_branch.width, - if ix + 1 == worktree_count { - end_y - } else { - bounds.max_y() - }, - ), - ), - background: Some(tree_branch.color), - border: gpui::Border::default(), - corner_radius: 0., - }); - cx.scene.push_quad(gpui::Quad { - bounds: RectF::from_points( - vec2f(start_x, end_y), - vec2f(end_x, end_y + tree_branch.width), + cx.scene.push_quad(gpui::Quad { + bounds: RectF::from_points( + vec2f(start_x, start_y), + vec2f( + start_x + tree_branch_width, + if ix + 1 == worktree_count { + end_y + } else { + bounds.max_y() + }, ), - background: Some(tree_branch.color), - border: gpui::Border::default(), - corner_radius: 0., - }); - }) - .boxed(), - ) - .with_width(20.) - .with_height(line_height) + ), + background: Some(tree_branch_color), + border: gpui::Border::default(), + corner_radius: 0., + }); + cx.scene.push_quad(gpui::Quad { + bounds: RectF::from_points( + vec2f(start_x, end_y), + vec2f(end_x, end_y + tree_branch_width), + ), + background: Some(tree_branch_color), + border: gpui::Border::default(), + corner_radius: 0., + }); + }) + .constrained() + .with_width(host_avatar_height) .boxed(), ) .with_child({ @@ -184,36 +183,38 @@ impl PeoplePanel { (true, true) => &theme.hovered_shared_worktree, }; - Container::new( - Flex::row() - .with_child( - Label::new( - worktree.root_name.clone(), - style.text.clone(), - ) - .aligned() - .left() - .constrained() - .with_height(guest_avatar_height) - .boxed(), + Flex::row() + .with_child( + Label::new( + worktree.root_name.clone(), + style.name.text.clone(), ) - .with_children(worktree.guests.iter().filter_map( - |participant| { - participant.avatar.clone().map(|avatar| { - Image::new(avatar) - .with_style(theme.guest_avatar) - .contained() - .with_margin_left( - theme.guest_avatar_spacing, - ) - .boxed() - }) - }, - )) + .aligned() + .left() + .contained() + .with_style(style.name.container) .boxed(), - ) - .with_style(style.container) - .boxed() + ) + .with_children(worktree.guests.iter().filter_map( + |participant| { + participant.avatar.clone().map(|avatar| { + Image::new(avatar) + .with_style(style.guest_avatar) + .aligned() + .left() + .contained() + .with_margin_right( + style.guest_avatar_spacing, + ) + .boxed() + }) + }, + )) + .contained() + .with_style(style.container) + .constrained() + .with_height(style.height) + .boxed() }, ) .with_cursor_style(if is_host || is_shared { @@ -237,6 +238,8 @@ impl PeoplePanel { .expanded(1.0) .boxed() }) + .constrained() + .with_height(theme.unshared_worktree.height) .boxed() }), ) diff --git a/zed/src/theme.rs b/zed/src/theme.rs index 568bb29f5aad3..8b43b09f13d28 100644 --- a/zed/src/theme.rs +++ b/zed/src/theme.rs @@ -109,21 +109,25 @@ pub struct ChatPanel { pub struct PeoplePanel { #[serde(flatten)] pub container: ContainerStyle, + pub host_row_height: f32, pub host_avatar: ImageStyle, pub host_username: ContainedText, - pub shared_worktree: ContainedText, - pub hovered_shared_worktree: ContainedText, - pub unshared_worktree: ContainedText, - pub hovered_unshared_worktree: ContainedText, - pub guest_avatar: ImageStyle, - pub guest_avatar_spacing: f32, - pub tree_branch: TreeBranch, + pub tree_branch_width: f32, + pub tree_branch_color: Color, + pub shared_worktree: WorktreeRow, + pub hovered_shared_worktree: WorktreeRow, + pub unshared_worktree: WorktreeRow, + pub hovered_unshared_worktree: WorktreeRow, } -#[derive(Copy, Clone, Deserialize)] -pub struct TreeBranch { - pub width: f32, - pub color: Color, +#[derive(Deserialize)] +pub struct WorktreeRow { + #[serde(flatten)] + pub container: ContainerStyle, + pub height: f32, + pub name: ContainedText, + pub guest_avatar: ImageStyle, + pub guest_avatar_spacing: f32, } #[derive(Deserialize)] From 6120ce37476c52928436b9482878530e893236ac Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 22 Sep 2021 17:09:24 +0200 Subject: [PATCH 43/43] Move people panel up Co-Authored-By: Nathan Sobo --- zed/src/workspace.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/zed/src/workspace.rs b/zed/src/workspace.rs index 19071b7326d31..df4afb020e324 100644 --- a/zed/src/workspace.rs +++ b/zed/src/workspace.rs @@ -375,6 +375,13 @@ impl Workspace { ); let mut right_sidebar = Sidebar::new(Side::Right); + right_sidebar.add_item( + "icons/user-16.svg", + cx.add_view(|cx| { + PeoplePanel::new(app_state.user_store.clone(), app_state.settings.clone(), cx) + }) + .into(), + ); right_sidebar.add_item( "icons/comment-16.svg", cx.add_view(|cx| { @@ -387,13 +394,6 @@ impl Workspace { }) .into(), ); - right_sidebar.add_item( - "icons/user-16.svg", - cx.add_view(|cx| { - PeoplePanel::new(app_state.user_store.clone(), app_state.settings.clone(), cx) - }) - .into(), - ); let mut current_user = app_state.user_store.read(cx).watch_current_user().clone(); let mut connection_status = app_state.rpc.status().clone();