diff --git a/Cargo.lock b/Cargo.lock index 82bf2983ae1..ca7d898b2c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4891,6 +4891,7 @@ dependencies = [ "hex", "libc", "miette", + "minicbor", "ockam", "ockam_api", "ockam_core", diff --git a/implementations/rust/ockam/ockam_app_lib/Cargo.toml b/implementations/rust/ockam/ockam_app_lib/Cargo.toml index 69433c56be2..22eb0c1e774 100644 --- a/implementations/rust/ockam/ockam_app_lib/Cargo.toml +++ b/implementations/rust/ockam/ockam_app_lib/Cargo.toml @@ -33,6 +33,7 @@ futures = { version = "0.3.28", default-features = false } hex = { version = "0.4.3", default-features = false, features = ["alloc", "serde"] } libc = "0.2" miette = { version = "5.10.0", features = ["fancy-no-backtrace"] } +minicbor = { version = "0.20.0", features = ["alloc", "derive"] } ockam = { path = "../ockam", version = "^0.103.0", features = ["software_vault"] } ockam_api = { path = "../ockam_api", version = "0.46.0", features = ["std"] } ockam_core = { path = "../ockam_core", version = "^0.93.0" } diff --git a/implementations/rust/ockam/ockam_app_lib/src/api/functions.rs b/implementations/rust/ockam/ockam_app_lib/src/api/functions.rs index ab2a63330c2..b2dad61cda8 100644 --- a/implementations/rust/ockam/ockam_app_lib/src/api/functions.rs +++ b/implementations/rust/ockam/ockam_app_lib/src/api/functions.rs @@ -53,12 +53,8 @@ extern "C" fn initialize_application( } let result = AppState::new( - Some(super::state::rust::ApplicationStateCallback::new( - application_state_callback, - )), - Some(super::notification::rust::NotificationCallback::new( - notification_callback, - )), + super::state::rust::ApplicationStateCallback::new(application_state_callback), + super::notification::rust::NotificationCallback::new(notification_callback), ); let app_state = match result { @@ -202,7 +198,7 @@ extern "C" fn disable_accepted_service(invitation_id: *const c_char) { }; let app_state = unsafe { APPLICATION_STATE.as_ref() }.expect(ERROR_NOT_INITIALIZED); app_state.context().runtime().spawn(async move { - let result = app_state.disconnect_tcp_inlet(&invitation_id).await; + let result = app_state.disable_tcp_inlet(&invitation_id).await; if let Err(err) = result { error!(?err, "Couldn't disable the service"); } diff --git a/implementations/rust/ockam/ockam_app_lib/src/incoming_services/commands.rs b/implementations/rust/ockam/ockam_app_lib/src/incoming_services/commands.rs new file mode 100644 index 00000000000..b3fa0f136c4 --- /dev/null +++ b/implementations/rust/ockam/ockam_app_lib/src/incoming_services/commands.rs @@ -0,0 +1,222 @@ +use crate::background_node::BackgroundNodeClient; +use crate::incoming_services::state::{IncomingService, Port}; +use crate::state::AppState; +use miette::IntoDiagnostic; +use ockam_api::address::get_free_address; +use ockam_api::cli_state::StateDirTrait; +use ockam_api::nodes::service::portals::Inlets; +use ockam_api::ConnectionStatus; +use ockam_core::api::Reply; +use ockam_multiaddr::MultiAddr; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; +use tracing::{debug, info, warn}; + +impl AppState { + pub(crate) async fn refresh_inlets(&self) -> crate::Result<()> { + info!("Refreshing inlets"); + + // for each invitation it checks if the relative node is running + // if not, it deletes the node and re-create the inlet + + let services_arc = self.incoming_services(); + let services = { + // reduce locking as much as possible to make UI consistently responsive + services_arc.read().await.services.clone() + }; + if services.is_empty() { + debug!("No incoming services, skipping inlets refresh"); + return Ok(()); + } + + let background_node_client = self.background_node_client().await; + for service in services { + let result = self + .refresh_inlet(background_node_client.clone(), &service) + .await; + { + // we want to reduce the scope of the guard as much as possible + let mut guard = services_arc.write().await; + match result { + Ok(port) => { + if let Some(service) = guard.find_mut_by_id(service.id()) { + service.set_port(port) + } + } + Err(err) => { + warn!(%err, "Failed to refresh TCP inlet for accepted invitation"); + if let Some(service) = guard.find_mut_by_id(service.id()) { + service.set_port(None) + } + } + } + } + self.publish_state().await; + } + + info!("Inlets refreshed"); + Ok(()) + } + + async fn refresh_inlet( + &self, + background_node_client: Arc, + service: &IncomingService, + ) -> crate::Result> { + let inlet_node_name = &service.local_node_name(); + debug!(node = %inlet_node_name, "Checking node status"); + if !service.enabled() { + debug!(node = %inlet_node_name, "TCP inlet is disabled by the user, deleting the node"); + let _ = self.delete_background_node(inlet_node_name).await; + return Ok(None); + } + + if self.is_connected(service, inlet_node_name).await { + Ok(None) + } else { + self.create_inlet(background_node_client, service) + .await + .map(Some) + } + } + + /// Returns true if the inlet is already connected to the destination node + /// if any error occurs, it returns false + async fn is_connected(&self, service: &IncomingService, inlet_node_name: &str) -> bool { + if self.state().await.nodes.exists(inlet_node_name) { + if let Ok(mut inlet_node) = self.background_node(inlet_node_name).await { + inlet_node.set_timeout(Duration::from_secs(5)); + if let Ok(Reply::Successful(inlet)) = inlet_node + .show_inlet(&self.context(), service.inlet_name()) + .await + { + if inlet.status == ConnectionStatus::Up { + debug!(node = %inlet_node_name, alias = %inlet.alias, "TCP inlet is already up"); + return true; + } + } + } + } + false + } + + /// Create the tcp-inlet for the accepted invitation + /// Returns the inlet [`Port`] if successful + async fn create_inlet( + &self, + background_node_client: Arc, + service: &IncomingService, + ) -> crate::Result { + debug!( + service_name = service.name(), + "Creating TCP inlet for accepted invitation" + ); + + let local_node_name = service.local_node_name(); + background_node_client + .projects() + .enroll( + &local_node_name, + &service.enrollment_ticket().hex_encoded()?, + ) + .await?; + + // Recreate the node using the trust context + debug!(node = %local_node_name, "Creating node to host TCP inlet"); + let _ = self.delete_background_node(&local_node_name).await; + background_node_client + .nodes() + .create(&local_node_name) + .await?; + tokio::time::sleep(Duration::from_millis(250)).await; + + let mut inlet_node = self.background_node(&local_node_name).await?; + inlet_node.set_timeout(Duration::from_secs(5)); + + let bind_address = match service.address() { + Some(address) => address, + None => get_free_address()?, + }; + + inlet_node + .create_inlet( + &self.context(), + &bind_address.to_string(), + &MultiAddr::from_str(&service.service_route()).into_diagnostic()?, + &Some(service.inlet_name().to_string()), + &None, + Duration::from_secs(5), + ) + .await?; + Ok(bind_address.port()) + } + + pub(crate) async fn disable_tcp_inlet(&self, invitation_id: &str) -> crate::Result<()> { + // first change the in-memory state + let service = { + // we want to reduce the scope of the guard as much as possible + let incoming_services_arc = self.incoming_services(); + let mut writer = incoming_services_arc.write().await; + let mut service = writer.find_mut_by_id(invitation_id); + if let Some(service) = service.as_mut() { + if !service.enabled() { + debug!(node = %service.local_node_name(), alias = %service.name(), "TCP inlet was already disconnected"); + return Ok(()); + } + service.disable(); + service.clone() + } else { + return Ok(()); + } + }; + + // this is an async operation, let's give feedback to the user as soon as possible + self.publish_state().await; + // change the persistent state + self.model_mut(|model| { + let service = model.upsert_incoming_service(invitation_id); + service.enabled = false; + }) + .await?; + self.background_node(&service.local_node_name()) + .await? + .delete_inlet(&self.context(), service.inlet_name()) + .await?; + + Ok(()) + } + + pub(crate) async fn enable_tcp_inlet(&self, invitation_id: &str) -> crate::Result<()> { + // first change the in-memory state + let changed = { + // we want to reduce the scope of the guard as much as possible + let incoming_services_arc = self.incoming_services(); + let mut writer = incoming_services_arc.write().await; + let mut service = writer.find_mut_by_id(invitation_id); + if let Some(service) = service.as_mut() { + if service.enabled() { + debug!(node = %service.local_node_name(), alias = %service.name(), "TCP inlet was already enabled"); + return Ok(()); + } + service.enable(); + info!(node = %service.local_node_name(), alias = %service.name(), "Enabled TCP inlet"); + true + } else { + false + } + }; + + if changed { + // this is an async operation, let's give feedback to the user as soon as possible + self.publish_state().await; + // change the persistent state + self.model_mut(|model| { + let service = model.upsert_incoming_service(invitation_id); + service.enabled = true; + }) + .await?; + } + Ok(()) + } +} diff --git a/implementations/rust/ockam/ockam_app_lib/src/incoming_services/mod.rs b/implementations/rust/ockam/ockam_app_lib/src/incoming_services/mod.rs new file mode 100644 index 00000000000..2683298be5f --- /dev/null +++ b/implementations/rust/ockam/ockam_app_lib/src/incoming_services/mod.rs @@ -0,0 +1,5 @@ +mod commands; +mod state; + +pub use state::IncomingServicesState; +pub use state::PersistentIncomingServiceState; diff --git a/implementations/rust/ockam/ockam_app_lib/src/incoming_services/state.rs b/implementations/rust/ockam/ockam_app_lib/src/incoming_services/state.rs new file mode 100644 index 00000000000..313d392c75a --- /dev/null +++ b/implementations/rust/ockam/ockam_app_lib/src/incoming_services/state.rs @@ -0,0 +1,413 @@ +use crate::state::{AppState, ModelState}; +use minicbor::{Decode, Encode}; +use ockam::identity::Identifier; +use ockam_api::cloud::share::InvitationWithAccess; +use ockam_api::identity::EnrollmentTicket; +use serde::{Deserialize, Serialize}; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use tracing::warn; + +/// A Socket port number +pub type Port = u16; + +#[derive(Clone, Debug, Decode, Encode, Serialize, Deserialize, PartialEq)] +#[rustfmt::skip] +#[cbor(map)] +pub struct PersistentIncomingServiceState { + #[n(1)] pub(crate) invitation_id: String, + #[n(2)] pub(crate) enabled: bool, + #[n(3)] pub(crate) name: Option, +} + +#[derive(Clone, Debug, Default)] +pub struct IncomingServicesState { + pub(crate) services: Vec, +} + +impl IncomingServicesState { + pub(crate) fn find_by_id(&self, id: &str) -> Option<&IncomingService> { + self.services.iter().find(|s| s.id == id) + } + + pub(crate) fn find_mut_by_id(&mut self, id: &str) -> Option<&mut IncomingService> { + self.services.iter_mut().find(|s| s.id == id) + } +} + +impl ModelState { + pub(crate) fn upsert_incoming_service( + &mut self, + id: &str, + ) -> &mut PersistentIncomingServiceState { + match self + .incoming_services + .iter_mut() + .position(|service| service.invitation_id == id) + { + // we have to use index, see https://github.com/rust-lang/rust/issues/21906 + Some(index) => &mut self.incoming_services[index], + None => { + self.incoming_services.push(PersistentIncomingServiceState { + invitation_id: id.to_string(), + enabled: true, + name: None, + }); + self.incoming_services.last_mut().unwrap() + } + } + } +} + +impl AppState { + pub async fn load_services_from_invitations( + &self, + accepted_invitations: Vec, + ) { + let incoming_services_arc = self.incoming_services(); + let mut guard = incoming_services_arc.write().await; + for invite in accepted_invitations { + // during the synchronization we only need to add new ones + if guard.find_by_id(&invite.invitation.id).is_some() { + continue; + } + + let service_access_details = match invite.service_access_details { + None => { + warn!( + "No service access details found for accepted invitations {}", + invite.invitation.id + ); + continue; + } + Some(service_access_details) => service_access_details, + }; + + let mut enabled = true; + let mut name = None; + + if let Some(state) = self + .model(|m| { + m.incoming_services + .iter() + .find(|s| s.invitation_id == invite.invitation.id) + .cloned() + }) + .await + { + enabled = state.enabled; + name = state.name.clone(); + } + + let original_name = match service_access_details.service_name() { + Ok(name) => name, + Err(err) => { + warn!(%err, "Failed to get service name from access details"); + continue; + } + }; + + let mut ticket = match service_access_details.enrollment_ticket() { + Ok(ticket) => ticket, + Err(err) => { + warn!(%err, "Failed to parse enrollment ticket from access details"); + continue; + } + }; + + let project = if let Some(project) = ticket.project.as_mut() { + // to avoid conflicts with 'default' name + project.name = project.id.clone(); + project + } else { + warn!("No project found in enrollment ticket"); + continue; + }; + + guard.services.push(IncomingService::new( + invite.invitation.id, + name.unwrap_or_else(|| original_name.clone()), + None, + enabled, + project.id.clone(), + service_access_details.shared_node_identity, + original_name, + ticket, + )); + } + } +} + +#[derive(Clone, Debug)] +/// This structure represent the live information about an incoming service +/// This status is a reflection of three source of truth: +/// - an accepted invitation, which contains the service access details +/// as well as the id and the default name +/// - live inlet status: which contains the port number (when available) +/// - persistent state: which contains the user-defined name and the enabled status +pub struct IncomingService { + // it's assumed the id is also the accepted invitation id + id: String, + // user-defined name, by default it's the same as the original name + name: String, + // this field contains the current port number + // it also reflects if the inlet is connected with the destination node + port: Option, + // whether the service should be enabled or not, this is the driver for the inlet + // and may not reflect the current status + enabled: bool, + // all remaining fields were extracted from the access details + project_id: String, + // the identity identifier of the destination node, used to reconstruct the full route + shared_node_identifier: Identifier, + // this is used as the outlet service name, and it's needed + // to reconstruct the full route + original_name: String, + // this enrollment ticket is modified to avoid conflicts with 'default' name + // the name of the project is re-set to 'project_id' + enrollment_ticket: EnrollmentTicket, +} + +impl IncomingService { + #[allow(clippy::too_many_arguments)] + pub fn new( + id: String, + name: String, + port: Option, + enabled: bool, + project_id: String, + shared_node_identifier: Identifier, + original_name: String, + enrollment_ticket: EnrollmentTicket, + ) -> Self { + Self { + id, + name, + port, + enabled, + project_id, + shared_node_identifier, + original_name, + enrollment_ticket, + } + } + + /// This is the id of the service as well as of the relative invitation + pub fn id(&self) -> &str { + &self.id + } + + /// This is the user-defined name of the service + pub fn name(&self) -> &str { + &self.name + } + + /// The port number of the inlet, if service is connected to the destination node + pub fn port(&self) -> Option { + self.port + } + + /// The address of the inlet, if service is connected to the destination node + pub fn address(&self) -> Option { + self.port + .map(|port| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port)) + } + + /// Whether the service is enabled or not, this may not reflect the current state + pub fn enabled(&self) -> bool { + self.enabled + } + + pub fn set_port(&mut self, port: Option) { + self.port = port; + } + + pub fn enable(&mut self) { + self.enabled = true; + } + pub fn disable(&mut self) { + self.enabled = false; + self.port = None; + } + + /// Returns the enrollment ticket, to avoid conflicts with 'default' name + /// the project name is re-set to the project id + pub fn enrollment_ticket(&self) -> &EnrollmentTicket { + &self.enrollment_ticket + } + + /// The relay name within the target project + pub fn relay_name(&self) -> String { + let bare_relay_name = self.shared_node_identifier.to_string(); + format!("forward_to_{bare_relay_name}") + } + + /// Returns the full route to the outlet service + pub fn service_route(&self) -> String { + let project_id = &self.project_id; + let relay_name = self.relay_name(); + let service_name = &self.original_name; + format!("/project/{project_id}/service/{relay_name}/secure/api/service/{service_name}") + } + + /// The name of the node that hosts the inlet + pub fn local_node_name(&self) -> String { + let project_id = &self.project_id; + let id = &self.id; + format!("ockam_app_{project_id}_{id}") + } + + /// Returns the name of the inlet within the node, for now it's a constant + pub fn inlet_name(&self) -> &str { + "app-inlet" + } +} + +#[cfg(test)] +mod tests { + use crate::incoming_services::PersistentIncomingServiceState; + use crate::state::AppState; + use ockam::identity::{Identifier, OneTimeCode}; + use ockam::Context; + use ockam_api::cli_state::CliState; + use ockam_api::cloud::share::{ + InvitationWithAccess, ReceivedInvitation, RoleInShare, ServiceAccessDetails, ShareScope, + }; + use ockam_api::config::lookup::ProjectLookup; + use ockam_api::identity::EnrollmentTicket; + use std::str::FromStr; + + fn create_invitation_with( + service_access_details: Option, + ) -> InvitationWithAccess { + InvitationWithAccess { + invitation: ReceivedInvitation { + id: "invitation_id".to_string(), + expires_at: "2020-09-12T15:07:14.00".to_string(), + grant_role: RoleInShare::Admin, + owner_email: "owner_email".to_string(), + scope: ShareScope::Project, + target_id: "target_id".to_string(), + ignored: false, + }, + service_access_details, + } + } + + fn create_service_access() -> ServiceAccessDetails { + ServiceAccessDetails { + project_identity: "I1234561234561234561234561234561234561234" + .try_into() + .unwrap(), + project_route: "mock_project_route".to_string(), + project_authority_identity: "Iabcdefabcdefabcdefabcdefabcdefabcdefabcd" + .try_into() + .unwrap(), + project_authority_route: "project_authority_route".to_string(), + shared_node_identity: "I12ab34cd56ef12ab34cd56ef12ab34cd56ef12ab" + .try_into() + .unwrap(), + shared_node_route: "remote_service_name".to_string(), + enrollment_ticket: EnrollmentTicket::new( + OneTimeCode::new(), + Some(ProjectLookup { + node_route: None, + id: "project_id".to_string(), + name: "project_name".to_string(), + identity_id: Some( + Identifier::from_str("I1234561234561234561234561234561234561234").unwrap(), + ), + authority: None, + okta: None, + }), + None, + ) + .hex_encoded() + .unwrap(), + } + } + + #[ockam::test(crate = "ockam")] + async fn test_inlet_data_from_invitation(context: &mut Context) -> ockam::Result<()> { + // in this test we want to validate data loading from the accepted invitation + // as well as using the related persistent data + let app_state = AppState::test(context, CliState::test().unwrap()).await; + + let mut invitation = create_invitation_with(None); + + // invitation without service access details should be skipped + app_state + .load_services_from_invitations(vec![invitation.clone()]) + .await; + + let services = app_state.incoming_services().read().await.services.clone(); + assert!(services.is_empty(), "No services should be loaded"); + + invitation.service_access_details = Some(create_service_access()); + + // then we load the invitation with service access details + app_state + .load_services_from_invitations(vec![invitation.clone()]) + .await; + let services = app_state.incoming_services().read().await.services.clone(); + assert_eq!(1, services.len(), "One service should be loaded"); + + let service = &services[0]; + assert_eq!("invitation_id", service.id()); + assert_eq!("remote_service_name", service.name()); + assert!(service.enabled()); + assert!(service.port().is_none()); + assert_eq!( + "project_id", + service.enrollment_ticket().project.as_ref().unwrap().name, + "project name should be overwritten with project id" + ); + assert_eq!( + "forward_to_I12ab34cd56ef12ab34cd56ef12ab34cd56ef12ab", + service.relay_name() + ); + assert_eq!("/project/project_id/service/forward_to_I12ab34cd56ef12ab34cd56ef12ab34cd56ef12ab/secure/api/service/remote_service_name", service.service_route()); + assert_eq!( + "ockam_app_project_id_invitation_id", + service.local_node_name() + ); + + let second_invitation = InvitationWithAccess { + invitation: ReceivedInvitation { + id: "second_invitation_id".to_string(), + expires_at: "2020-09-12T15:07:14.00".to_string(), + grant_role: RoleInShare::Admin, + owner_email: "owner_email".to_string(), + scope: ShareScope::Project, + target_id: "target_id".to_string(), + ignored: false, + }, + service_access_details: invitation.service_access_details.clone(), + }; + + // let's load another invitation, but persistent state for it already exists + app_state + .model_mut(|m| { + m.incoming_services.push(PersistentIncomingServiceState { + invitation_id: "second_invitation_id".to_string(), + enabled: false, + name: Some("custom_user_name".to_string()), + }) + }) + .await + .unwrap(); + + app_state + .load_services_from_invitations(vec![invitation.clone(), second_invitation.clone()]) + .await; + let services = app_state.incoming_services().read().await.services.clone(); + assert_eq!(2, services.len(), "Two services should be loaded"); + + let service = &services[1]; + assert_eq!("second_invitation_id", service.id()); + assert!(!service.enabled()); + assert_eq!("custom_user_name", service.name()); + assert_eq!("/project/project_id/service/forward_to_I12ab34cd56ef12ab34cd56ef12ab34cd56ef12ab/secure/api/service/remote_service_name", service.service_route()); + + context.stop().await + } +} diff --git a/implementations/rust/ockam/ockam_app_lib/src/invitations/commands.rs b/implementations/rust/ockam/ockam_app_lib/src/invitations/commands.rs index 56004d7b6db..f2121c0471b 100644 --- a/implementations/rust/ockam/ockam_app_lib/src/invitations/commands.rs +++ b/implementations/rust/ockam/ockam_app_lib/src/invitations/commands.rs @@ -1,28 +1,61 @@ use miette::IntoDiagnostic; -use std::collections::HashMap; use std::net::SocketAddr; use std::str::FromStr; -use std::sync::Arc; -use std::time::Duration; use tracing::{debug, info, trace, warn}; use crate::api::notification::rust::Notification; use crate::api::notification::Kind; -use ockam_api::address::get_free_address; -use ockam_api::cli_state::{CliState, StateDirTrait}; -use ockam_api::cloud::project::Project; -use ockam_api::cloud::share::InvitationListKind; -use ockam_api::cloud::share::{CreateServiceInvitation, InvitationWithAccess, Invitations}; -use ockam_api::nodes::service::portals::Inlets; -use ockam_api::ConnectionStatus; -use ockam_multiaddr::MultiAddr; - -use crate::background_node::BackgroundNodeClient; -use crate::invitations::state::{Inlet, ReceivedInvitationStatus}; -use crate::shared_service::relay::create::relay_name_from_identifier; +use crate::invitations::state::ReceivedInvitationStatus; use crate::state::{AppState, PROJECT_NAME}; +use ockam_api::cloud::share::{CreateServiceInvitation, InvitationListKind, Invitations}; impl AppState { + /// Fetch received, accept and sent invitations from the orchestrator + pub async fn refresh_invitations(&self) -> Result<(), String> { + info!("Refreshing invitations"); + let invitations = { + if !self.is_enrolled().await.unwrap_or(false) { + debug!("not enrolled, skipping invitations refresh"); + return Ok(()); + } + let controller = self.controller().await.map_err(|e| e.to_string())?; + let invitations = controller + .list_invitations(&self.context(), InvitationListKind::All) + .await + .map_err(|e| e.to_string())?; + debug!("Invitations fetched"); + trace!(?invitations); + invitations + }; + + let (changes, accepted_invitations) = { + let invitations_arc = self.invitations(); + let mut guard = invitations_arc.write().await; + let changes = guard.replace_by(invitations); + if changes.changed { + (changes, Some(guard.accepted.invitations.clone())) + } else { + (changes, None) + } + }; + + if changes.changed { + self.publish_state().await; + self.load_services_from_invitations(accepted_invitations.unwrap()) + .await; + self.schedule_inlets_refresh_now(); + if changes.new_received_invitation { + self.notify(Notification { + kind: Kind::Information, + title: "You have pending invitations".to_string(), + message: "".to_string(), + }) + } + } + + Ok(()) + } + pub async fn accept_invitation(&self, id: String) -> Result<(), String> { self.accept_invitation_impl(id) .await @@ -257,421 +290,4 @@ impl AppState { self.schedule_invitations_refresh_now(); Ok(()) } - - /// Fetch received, accept and sent invitations from the orchestrator - pub async fn refresh_invitations(&self) -> Result<(), String> { - info!("Refreshing invitations"); - let invitations = { - if !self.is_enrolled().await.unwrap_or(false) { - debug!("not enrolled, skipping invitations refresh"); - return Ok(()); - } - let controller = self.controller().await.map_err(|e| e.to_string())?; - let invitations = controller - .list_invitations(&self.context(), InvitationListKind::All) - .await - .map_err(|e| e.to_string())?; - debug!("Invitations fetched"); - trace!(?invitations); - invitations - }; - - let changed = { - let invitations_arc = self.invitations(); - let mut guard = invitations_arc.write().await; - guard.replace_by(invitations) - }; - - if changed.changed { - self.publish_state().await; - self.schedule_inlets_refresh_now(); - if changed.new_received_invitation { - self.notify(Notification { - kind: Kind::Information, - title: "You have pending invitations".to_string(), - message: "".to_string(), - }) - } - } - - Ok(()) - } - - pub(crate) async fn refresh_inlets(&self) -> crate::Result<()> { - info!("Refreshing inlets"); - - // for each invitation it checks if the relative node is running - // if not, it deletes the node and re-create the inlet - - let invitations_arc = self.invitations(); - let invitations = { - // reduce locking as much as possible to make UI consistently responsive - invitations_arc.read().await.clone() - }; - if invitations.accepted.invitations.is_empty() { - debug!("No accepted invitations, skipping inlets refresh"); - return Ok(()); - } - - let cli_state = self.state().await; - let background_node_client = self.background_node_client().await; - for invitation in invitations.accepted.invitations { - match InletDataFromInvitation::new( - &cli_state, - &invitation, - &invitations.accepted.inlets, - ) { - Ok(inlet_data) => match inlet_data { - Some(inlet_data) => { - let result = self - .refresh_inlet(background_node_client.clone(), inlet_data) - .await; - { - // we want to reduce the scope of the guard as much as possible - let mut guard = invitations_arc.write().await; - match result { - Ok(value) => { - if let Some(inlet) = value { - guard - .accepted - .inlets - .insert(invitation.invitation.id.clone(), inlet); - } else { - // disabled inlet - guard.accepted.inlets.remove(&invitation.invitation.id); - } - } - Err(err) => { - warn!(%err, "Failed to refresh TCP inlet for accepted invitation"); - guard.accepted.inlets.remove(&invitation.invitation.id); - } - } - } - self.publish_state().await; - } - None => { - warn!("Invalid invitation data"); - } - }, - Err(err) => { - warn!(%err, "Failed to parse invitation data"); - } - } - } - - info!("Inlets refreshed"); - Ok(()) - } - - async fn refresh_inlet( - &self, - background_node_client: Arc, - mut inlet_data: InletDataFromInvitation, - ) -> crate::Result> { - let inlet_node_name = &inlet_data.local_node_name; - debug!(node = %inlet_node_name, "Checking node status"); - if !inlet_data.enabled { - debug!(node = %inlet_node_name, "TCP inlet is disabled by the user, deleting the node"); - let _ = self.delete_background_node(inlet_node_name).await; - // we want to keep the entry to store the attribute `enabled = false` - return Inlet::new(inlet_data).map(Some); - } - - if self.state().await.nodes.exists(inlet_node_name) { - let mut inlet_node = self.background_node(inlet_node_name).await?; - inlet_node.set_timeout(Duration::from_secs(5)); - - if let Ok(inlet) = inlet_node - .show_inlet(&self.context(), &inlet_data.service_name) - .await? - .success() - { - if inlet.status == ConnectionStatus::Up { - debug!(node = %inlet_node_name, alias = %inlet.alias, "TCP inlet is already up"); - inlet_data.socket_addr = Some(inlet.bind_addr.parse()?); - return Inlet::new(inlet_data).map(Some); - } - } - } - - let socket_addr = self - .create_inlet(background_node_client, &inlet_data) - .await?; - inlet_data.socket_addr = Some(socket_addr); - Inlet::new(inlet_data).map(Some) - } - - /// Create the tcp-inlet for the accepted invitation - /// Returns the inlet SocketAddr - async fn create_inlet( - &self, - background_node_client: Arc, - inlet_data: &InletDataFromInvitation, - ) -> crate::Result { - debug!(service_name = ?inlet_data.service_name, "Creating TCP inlet for accepted invitation"); - let InletDataFromInvitation { - enabled, - local_node_name, - service_name, - service_route, - enrollment_ticket_hex, - socket_addr, - .. - } = inlet_data; - if !enabled { - return Err("TCP inlet is disabled by the user".into()); - } - let from = match socket_addr { - Some(socket_addr) => *socket_addr, - None => get_free_address()?, - }; - if let Some(enrollment_ticket_hex) = enrollment_ticket_hex { - background_node_client - .projects() - .enroll(local_node_name, enrollment_ticket_hex) - .await?; - } - - // Recreate the node using the trust context - debug!(node = %local_node_name, "Creating node to host TCP inlet"); - let _ = self.delete_background_node(local_node_name).await; - background_node_client - .nodes() - .create(local_node_name) - .await?; - tokio::time::sleep(Duration::from_millis(250)).await; - - let mut inlet_node = self.background_node(local_node_name).await?; - inlet_node.set_timeout(Duration::from_secs(5)); - inlet_node - .create_inlet( - &self.context(), - &from.to_string(), - &MultiAddr::from_str(service_route).into_diagnostic()?, - &Some(service_name.to_string()), - &None, - Duration::from_secs(5), - ) - .await?; - Ok(from) - } - - pub(crate) async fn disconnect_tcp_inlet(&self, invitation_id: &str) -> crate::Result<()> { - let inlet = { - let invitations = self.invitations(); - let mut writer = invitations.write().await; - let mut inlet = writer.accepted.inlets.get_mut(invitation_id); - - if let Some(inlet) = inlet.as_mut() { - if !inlet.enabled { - debug!(node = %inlet.node_name, alias = %inlet.alias, "TCP inlet was already disconnected"); - return Ok(()); - } - inlet.disable(); - } - inlet.cloned() - }; - - if let Some(inlet) = inlet { - self.background_node(&inlet.node_name) - .await? - .delete_inlet(&self.context(), &inlet.alias) - .await?; - self.publish_state().await; - } - Ok(()) - } - - pub(crate) async fn enable_tcp_inlet(&self, invitation_id: &str) -> crate::Result<()> { - let changed = { - let invitations = self.invitations(); - let mut writer = invitations.write().await; - if let Some(inlet) = writer.accepted.inlets.get(invitation_id).cloned() { - if inlet.enabled { - debug!(node = %inlet.node_name, alias = %inlet.alias, "TCP inlet was already enabled"); - return Ok(()); - } - // refresh will re-create the structure in the meantime it'll be seen in - // 'connecting' status - writer.accepted.inlets.remove(invitation_id); - info!(node = %inlet.node_name, alias = %inlet.alias, "Enabled TCP inlet"); - true - } else { - false - } - }; - - if changed { - self.publish_state().await; - } - Ok(()) - } -} - -#[derive(Debug)] -pub(crate) struct InletDataFromInvitation { - pub enabled: bool, - pub local_node_name: String, - pub service_name: String, - pub service_route: String, - pub enrollment_ticket_hex: Option, - pub socket_addr: Option, -} - -impl InletDataFromInvitation { - pub fn new( - cli_state: &CliState, - invitation: &InvitationWithAccess, - inlets: &HashMap, - ) -> crate::Result> { - match &invitation.service_access_details { - Some(access_details) => { - let service_name = access_details.service_name()?; - let mut enrollment_ticket = access_details.enrollment_ticket()?; - // The enrollment ticket contains the project data. - // We need to replace the project name on the enrollment ticket with the project id, - // so that, when using the enrollment ticket, there are no conflicts with the default project. - // The node created when setting up the TCP inlet is meant to only serve that TCP inlet and - // only has to resolve the `/project/{id}` project to create the needed secure-channel. - if let Some(project) = enrollment_ticket.project.as_mut() { - project.name = project.id.clone(); - } - let enrollment_ticket_hex = if invitation.invitation.is_expired()? { - None - } else { - Some(enrollment_ticket.hex_encoded()?) - }; - - if let Some(project) = enrollment_ticket.project { - // At this point, the project name will be the project id. - let project = cli_state - .projects - .overwrite(project.name.clone(), Project::from(project.clone()))?; - assert_eq!( - project.name(), - project.id(), - "Project name should be the project id" - ); - let project_id = project.id(); - let relay_name = - relay_name_from_identifier(&access_details.shared_node_identity); - let service_route = format!( - "/project/{project_id}/service/{relay_name}/secure/api/service/{service_name}" - ); - let local_node_name = format!("ockam_app_{project_id}_{service_name}"); - - let inlet = inlets.get(&invitation.invitation.id); - let enabled = inlet.map(|i| i.enabled).unwrap_or(true); - let socket_addr = inlet.map(|i| i.socket_addr); - - Ok(Some(Self { - enabled, - local_node_name, - service_name, - service_route, - enrollment_ticket_hex, - socket_addr, - })) - } else { - warn!(?invitation, "No project data found in enrollment ticket"); - Ok(None) - } - } - None => { - warn!( - ?invitation, - "No service details found in accepted invitation" - ); - Ok(None) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use ockam::identity::{Identifier, OneTimeCode}; - use ockam_api::cloud::share::{ - ReceivedInvitation, RoleInShare, ServiceAccessDetails, ShareScope, - }; - use ockam_api::config::lookup::ProjectLookup; - use ockam_api::identity::EnrollmentTicket; - - #[test] - fn test_inlet_data_from_invitation() { - let cli_state = CliState::test().unwrap(); - let mut inlets = HashMap::new(); - let mut invitation = InvitationWithAccess { - invitation: ReceivedInvitation { - id: "invitation_id".to_string(), - expires_at: "2020-09-12T15:07:14.00".to_string(), - grant_role: RoleInShare::Admin, - owner_email: "owner_email".to_string(), - scope: ShareScope::Project, - target_id: "target_id".to_string(), - ignored: false, - }, - service_access_details: None, - }; - - // InletDataFromInvitation will be none because `service_access_details` is none - assert!( - InletDataFromInvitation::new(&cli_state, &invitation, &inlets) - .unwrap() - .is_none() - ); - - invitation.service_access_details = Some(ServiceAccessDetails { - project_identity: "I1234561234561234561234561234561234561234" - .try_into() - .unwrap(), - project_route: "project_route".to_string(), - project_authority_identity: "Iabcdefabcdefabcdefabcdefabcdefabcdefabcd" - .try_into() - .unwrap(), - project_authority_route: "project_authority_route".to_string(), - shared_node_identity: "I12ab34cd56ef12ab34cd56ef12ab34cd56ef12ab" - .try_into() - .unwrap(), - shared_node_route: "shared_node_route".to_string(), - enrollment_ticket: EnrollmentTicket::new( - OneTimeCode::new(), - Some(ProjectLookup { - node_route: None, - id: "project_identity".to_string(), - name: "project_name".to_string(), - identity_id: Some( - Identifier::from_str("I1234561234561234561234561234561234561234").unwrap(), - ), - authority: None, - okta: None, - }), - None, - ) - .hex_encoded() - .unwrap(), - }); - - // Validate the inlet data, with no prior inlet data - let inlet_data = InletDataFromInvitation::new(&cli_state, &invitation, &inlets) - .unwrap() - .unwrap(); - assert!(inlet_data.socket_addr.is_none()); - - // Validate the inlet data, with prior inlet data - inlets.insert( - "invitation_id".to_string(), - Inlet { - node_name: "local_node_name".to_string(), - alias: "alias".to_string(), - socket_addr: "127.0.0.1:1000".parse().unwrap(), - enabled: true, - }, - ); - let inlet_data = InletDataFromInvitation::new(&cli_state, &invitation, &inlets) - .unwrap() - .unwrap(); - assert!(inlet_data.socket_addr.is_some()); - } } diff --git a/implementations/rust/ockam/ockam_app_lib/src/invitations/state.rs b/implementations/rust/ockam/ockam_app_lib/src/invitations/state.rs index e1189a2381c..0fa93edbfcf 100644 --- a/implementations/rust/ockam/ockam_app_lib/src/invitations/state.rs +++ b/implementations/rust/ockam/ockam_app_lib/src/invitations/state.rs @@ -1,6 +1,3 @@ -use std::collections::HashMap; -use std::net::SocketAddr; - use serde::{Deserialize, Serialize}; use tracing::debug; @@ -8,9 +5,6 @@ use ockam_api::cloud::share::{ InvitationList, InvitationWithAccess, ReceivedInvitation, SentInvitation, }; -use crate::invitations::commands::InletDataFromInvitation; -use crate::{Error, Result}; - #[derive(Clone, Debug, Default, Deserialize, Serialize)] pub struct InvitationState { #[serde(default)] @@ -87,37 +81,6 @@ pub enum ReceivedInvitationStatus { pub struct AcceptedInvitations { #[serde(default)] pub(crate) invitations: Vec, - - /// Inlets for accepted invitations, keyed by invitation id. - #[serde(default)] - pub(crate) inlets: HashMap, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub(crate) struct Inlet { - pub(crate) node_name: String, - pub(crate) alias: String, - pub(crate) socket_addr: SocketAddr, - pub(crate) enabled: bool, -} - -impl Inlet { - pub(crate) fn new(data: InletDataFromInvitation) -> Result { - let socket_addr = match data.socket_addr { - Some(addr) => addr, - None => return Err(Error::App("Socket address should be set".to_string())), - }; - Ok(Self { - node_name: data.local_node_name, - alias: data.service_name, - socket_addr, - enabled: data.enabled, - }) - } - - pub(crate) fn disable(&mut self) { - self.enabled = false; - } } #[cfg(test)] diff --git a/implementations/rust/ockam/ockam_app_lib/src/lib.rs b/implementations/rust/ockam/ockam_app_lib/src/lib.rs index 9f9aaa73a5a..9f200662556 100644 --- a/implementations/rust/ockam/ockam_app_lib/src/lib.rs +++ b/implementations/rust/ockam/ockam_app_lib/src/lib.rs @@ -11,6 +11,7 @@ mod background_node; mod cli; mod enroll; mod error; +mod incoming_services; mod invitations; mod log; mod projects; diff --git a/implementations/rust/ockam/ockam_app_lib/src/shared_service/relay/create.rs b/implementations/rust/ockam/ockam_app_lib/src/shared_service/relay/create.rs index 612c68dbc39..797faafbeda 100644 --- a/implementations/rust/ockam/ockam_app_lib/src/shared_service/relay/create.rs +++ b/implementations/rust/ockam/ockam_app_lib/src/shared_service/relay/create.rs @@ -2,7 +2,6 @@ use crate::api::state::OrchestratorStatus; use crate::state::AppState; use crate::Result; use miette::IntoDiagnostic; -use ockam::identity::Identifier; use ockam::Context; use ockam_api::cli_state::{CliState, StateDirTrait}; use ockam_api::nodes::models::relay::RelayInfo; @@ -130,11 +129,6 @@ fn relay_name(cli_state: &CliState) -> ockam::Result { Ok(format!("forward_to_{bare_relay_name}")) } -pub(crate) fn relay_name_from_identifier(identifier: &Identifier) -> String { - let bare_relay_name = identifier.to_string(); - format!("forward_to_{bare_relay_name}") -} - fn bare_relay_name(cli_state: &CliState) -> ockam::Result { Ok(cli_state .identities diff --git a/implementations/rust/ockam/ockam_app_lib/src/state/mod.rs b/implementations/rust/ockam/ockam_app_lib/src/state/mod.rs index c390d4750fb..7104553d81d 100644 --- a/implementations/rust/ockam/ockam_app_lib/src/state/mod.rs +++ b/implementations/rust/ockam/ockam_app_lib/src/state/mod.rs @@ -38,6 +38,7 @@ use ockam_api::trust_context::TrustContextConfigBuilder; use ockam_multiaddr::MultiAddr; use crate::api::state::OrchestratorStatus; +use crate::incoming_services::IncomingServicesState; use crate::scheduler::Scheduler; use crate::state::tasks::{ RefreshInletsTask, RefreshInvitationsTask, RefreshProjectsTask, RefreshRelayTask, @@ -66,6 +67,7 @@ pub struct AppState { background_node_client: Arc>>, projects: Arc>>, invitations: Arc>, + incoming_services: Arc>, application_state_callback: Option, notification_callback: Option, node_manager: Arc>>, @@ -77,63 +79,96 @@ pub struct AppState { pub(crate) tracing_guard: Arc>, } +async fn create_node_manager(ctx: Arc, cli_state: &CliState) -> Arc { + match make_node_manager(ctx.clone(), cli_state).await { + Ok(w) => w, + Err(e) => { + error!(%e, "Cannot load the model state"); + panic!("Cannot load the model state: {e:?}") + } + } +} + impl AppState { - /// Create a new AppState, if it fails you can assume it's because the state cannot be loaded + /// Creates a new AppState, if it fails you can assume it's because the state cannot be loaded + /// when `cli_state` is `None` the default initialization will be used pub fn new( - application_state_callback: Option, - notification_callback: Option, + application_state_callback: ApplicationStateCallback, + notification_callback: NotificationCallback, ) -> Result { let cli_state = CliState::initialize()?; let (context, mut executor) = NodeBuilder::new().no_logging().build(); let context = Arc::new(context); - let runtime = context.runtime().clone(); - - let future = { - let runtime = runtime.clone(); - async move { - // start the router, it is needed for the node manager creation - runtime.spawn(async move { - let result = executor.start_router().await; - if let Err(e) = result { - error!(%e, "Failed to start the router") - } - }); - // create the application state and its dependencies - let node_manager = create_node_manager(context.clone(), &cli_state).await; - let model_state_repository = create_model_state_repository(&cli_state).await; - let model_state = model_state_repository - .load() - .await - .expect("Failed to load the model state") - .unwrap_or(ModelState::default()); - - info!("AppState initialized"); - AppState { - context, - application_state_callback, - notification_callback, - state: Arc::new(RwLock::new(cli_state)), - orchestrator_status: Arc::new(Mutex::new(Default::default())), - node_manager: Arc::new(RwLock::new(node_manager)), - model_state: Arc::new(RwLock::new(model_state)), - model_state_repository: Arc::new(RwLock::new(model_state_repository)), - background_node_client: Arc::new(RwLock::new(Arc::new(Cli::new()))), - projects: Arc::new(Default::default()), - invitations: Arc::new(RwLock::new(InvitationState::default())), - refresh_project_scheduler: Arc::new(Default::default()), - refresh_invitations_scheduler: Arc::new(Default::default()), - refresh_inlets_scheduler: Arc::new(Default::default()), - refresh_relay_scheduler: Arc::new(Default::default()), - last_published_snapshot: Arc::new(Mutex::new(None)), - tracing_guard: Arc::new(Default::default()), - } + // start the router, it is needed for the node manager creation + context.runtime().spawn(async move { + let result = executor.start_router().await; + if let Err(e) = result { + error!(%e, "Failed to start the router") } + }); + + let runtime = context.runtime().clone(); + let future = async { + Self::make( + context, + Some(application_state_callback), + Some(notification_callback), + cli_state, + ) + .await }; Ok(runtime.block_on(future)) } + /// Creates a new AppState for testing purposes + #[cfg(test)] + pub async fn test(context: &Context, cli_state: CliState) -> AppState { + let context = ockam_core::AsyncTryClone::async_try_clone(context) + .await + .unwrap(); + Self::make(Arc::new(context), None, None, cli_state).await + } + + async fn make( + context: Arc, + application_state_callback: Option, + notification_callback: Option, + cli_state: CliState, + ) -> AppState { + // create the application state and its dependencies + let node_manager = create_node_manager(context.clone(), &cli_state).await; + let model_state_repository = create_model_state_repository(&cli_state).await; + let model_state = model_state_repository + .load() + .await + .expect("Failed to load the model state") + .unwrap_or(ModelState::default()); + + info!("AppState initialized"); + AppState { + context, + application_state_callback, + notification_callback, + state: Arc::new(RwLock::new(cli_state)), + orchestrator_status: Arc::new(Mutex::new(Default::default())), + node_manager: Arc::new(RwLock::new(node_manager)), + model_state: Arc::new(RwLock::new(model_state)), + model_state_repository: Arc::new(RwLock::new(model_state_repository)), + background_node_client: Arc::new(RwLock::new(Arc::new(Cli::new()))), + projects: Arc::new(Default::default()), + invitations: Arc::new(RwLock::new(InvitationState::default())), + incoming_services: Arc::new(RwLock::new(IncomingServicesState::default())), + refresh_project_scheduler: Arc::new(Default::default()), + refresh_invitations_scheduler: Arc::new(Default::default()), + refresh_inlets_scheduler: Arc::new(Default::default()), + refresh_relay_scheduler: Arc::new(Default::default()), + last_published_snapshot: Arc::new(Mutex::new(None)), + tracing_guard: Arc::new(Default::default()), + } + } + /// Load a previously persisted ModelState and start refreshing schedule pub async fn load_model_state(&'static self) { self.restore_tcp_outlets().await; @@ -195,16 +230,15 @@ impl AppState { let this = self.clone(); runtime.spawn(async move { let inlets: Vec = { - let invitation_state = this.invitations().read().await.clone(); - invitation_state - .accepted - .inlets - .values() - .map(|inlet| inlet.node_name.clone()) + let services = this.incoming_services().read().await.clone(); + services + .services + .iter() + .map(|inlet| inlet.local_node_name()) .collect() }; - for node_name in inlets { + for node_name in inlets.into_iter() { let _ = this.delete_background_node(&node_name).await; } @@ -318,6 +352,11 @@ impl AppState { self.invitations.clone() } + /// Returns the status of the services + pub fn incoming_services(&self) -> Arc> { + self.incoming_services.clone() + } + /// Return the application cli state /// This can be used to manage the on-disk state for projects, identities, vaults, etc... pub async fn state(&self) -> CliState { @@ -473,6 +512,7 @@ impl AppState { let mut groups: Vec; let mut sent_invitations: Vec; let invitation_state = { self.invitations().read().await.clone() }; + let incoming_services_state = { self.incoming_services().read().await.clone() }; // we want to sort everything to avoid having to deal with ordering in the UI if enrolled { @@ -581,33 +621,17 @@ impl AppState { invitations }, incoming_services: { - let mut incoming_services: Vec = invitation_state - .accepted - .invitations + let mut incoming_services: Vec = incoming_services_state + .services .iter() - .filter(|invitation| { - invitation.invitation.owner_email == email - && invitation.service_access_details.is_some() - }) - .map(|invitation| { - let access_details = - invitation.service_access_details.as_ref().unwrap(); - let inlet = invitation_state - .accepted - .inlets - .get(&invitation.invitation.id); - - Service { - id: invitation.invitation.id.clone(), - source_name: access_details - .service_name() - .unwrap_or("unknown".to_string()), - address: inlet.map(|inlet| inlet.socket_addr.ip().to_string()), - port: inlet.map(|inlet| inlet.socket_addr.port()), - scheme: None, - available: inlet.is_some(), - enabled: inlet.map(|inlet| inlet.enabled).unwrap_or(true), - } + .map(|service| Service { + id: service.id().to_string(), + source_name: service.name().to_string(), + address: service.address().map(|addr| addr.ip().to_string()), + port: service.port(), + scheme: None, + available: service.port().is_some(), + enabled: service.enabled(), }) .collect(); @@ -647,16 +671,6 @@ impl AppState { } } -async fn create_node_manager(ctx: Arc, cli_state: &CliState) -> Arc { - match make_node_manager(ctx.clone(), cli_state).await { - Ok(w) => w, - Err(e) => { - error!(%e, "Cannot load the model state"); - panic!("Cannot load the model state: {e:?}") - } - } -} - /// Make a node manager with a default node called "default" pub(crate) async fn make_node_manager( ctx: Arc, diff --git a/implementations/rust/ockam/ockam_app_lib/src/state/model.rs b/implementations/rust/ockam/ockam_app_lib/src/state/model.rs index 1f2466b3553..422f744a57f 100644 --- a/implementations/rust/ockam/ockam_app_lib/src/state/model.rs +++ b/implementations/rust/ockam/ockam_app_lib/src/state/model.rs @@ -1,3 +1,4 @@ +use crate::incoming_services::PersistentIncomingServiceState; use ockam_api::nodes::models::portal::OutletStatus; use serde::{Deserialize, Serialize}; @@ -6,16 +7,25 @@ use serde::{Deserialize, Serialize}; pub struct ModelState { #[serde(default = "Vec::new")] pub(crate) tcp_outlets: Vec, + + #[serde(default = "Vec::new")] + pub(crate) incoming_services: Vec, } impl Default for ModelState { fn default() -> Self { - ModelState::new(vec![]) + ModelState::new(vec![], vec![]) } } impl ModelState { - pub fn new(tcp_outlets: Vec) -> Self { - Self { tcp_outlets } + pub fn new( + tcp_outlets: Vec, + incoming_services: Vec, + ) -> Self { + Self { + tcp_outlets, + incoming_services, + } } } diff --git a/implementations/swift/ockam/ockam_app/Ockam.xcodeproj/project.pbxproj b/implementations/swift/ockam/ockam_app/Ockam.xcodeproj/project.pbxproj index efeb04783cf..cd51da54a41 100644 --- a/implementations/swift/ockam/ockam_app/Ockam.xcodeproj/project.pbxproj +++ b/implementations/swift/ockam/ockam_app/Ockam.xcodeproj/project.pbxproj @@ -21,6 +21,7 @@ 783B62722AC432B700880261 /* Bridge.swift in Sources */ = {isa = PBXBuildFile; fileRef = 783B62712AC432B700880261 /* Bridge.swift */; }; 789FA6462AE142280009FF7F /* ExportOptions.plist in Resources */ = {isa = PBXBuildFile; fileRef = 789FA6452AE142280009FF7F /* ExportOptions.plist */; }; 78BD349D2AFA621500F09058 /* BrokenStateView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 78BD349C2AFA621500F09058 /* BrokenStateView.swift */; }; + 78BD34A12AFCFDF100F09058 /* RemoteServiceView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 78BD34A02AFCFDF100F09058 /* RemoteServiceView.swift */; }; 78D520832ADD5CAE00F36B64 /* Helpers.swift in Sources */ = {isa = PBXBuildFile; fileRef = 78D520822ADD5CAE00F36B64 /* Helpers.swift */; }; 78ED0DA12AD4354400574AE9 /* CreateService.swift in Sources */ = {isa = PBXBuildFile; fileRef = 78ED0DA02AD4354400574AE9 /* CreateService.swift */; }; 78ED0DA32AD4501200574AE9 /* ShareService.swift in Sources */ = {isa = PBXBuildFile; fileRef = 78ED0DA22AD4501200574AE9 /* ShareService.swift */; }; @@ -46,6 +47,7 @@ 785D8D372AD5369100DF5004 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist; path = Info.plist; sourceTree = ""; }; 789FA6452AE142280009FF7F /* ExportOptions.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = ExportOptions.plist; sourceTree = ""; }; 78BD349C2AFA621500F09058 /* BrokenStateView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = BrokenStateView.swift; sourceTree = ""; }; + 78BD34A02AFCFDF100F09058 /* RemoteServiceView.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RemoteServiceView.swift; sourceTree = ""; }; 78D520822ADD5CAE00F36B64 /* Helpers.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Helpers.swift; sourceTree = ""; }; 78ED0DA02AD4354400574AE9 /* CreateService.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CreateService.swift; sourceTree = ""; }; 78ED0DA22AD4501200574AE9 /* ShareService.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ShareService.swift; sourceTree = ""; }; @@ -87,6 +89,7 @@ 782D5B472AC1D3D700D1B27F /* Ockam */ = { isa = PBXGroup; children = ( + 78BD34A02AFCFDF100F09058 /* RemoteServiceView.swift */, 789FA6452AE142280009FF7F /* ExportOptions.plist */, 785D8D372AD5369100DF5004 /* Info.plist */, 782D5B482AC1D3D700D1B27F /* OckamApp.swift */, @@ -201,6 +204,7 @@ buildActionMask = 2147483647; files = ( 7827D5092AD93F1700F7A20F /* ClickableMenuEntry.swift in Sources */, + 78BD34A12AFCFDF100F09058 /* RemoteServiceView.swift in Sources */, 782D5B4B2AC1D3D700D1B27F /* MainView.swift in Sources */, 78ED0DA12AD4354400574AE9 /* CreateService.swift in Sources */, 78D520832ADD5CAE00F36B64 /* Helpers.swift in Sources */, diff --git a/implementations/swift/ockam/ockam_app/Ockam/RemoteServiceView.swift b/implementations/swift/ockam/ockam_app/Ockam/RemoteServiceView.swift new file mode 100644 index 00000000000..eabdc762a32 --- /dev/null +++ b/implementations/swift/ockam/ockam_app/Ockam/RemoteServiceView.swift @@ -0,0 +1,126 @@ +import SwiftUI + +struct RemoteServiceView: View { + @State private var isHovered = false + @State private var isOpen = false + @ObservedObject var service: Service + + @Environment(\.presentationMode) var presentationMode: Binding + func closeWindow() { + self.presentationMode.wrappedValue.dismiss() + } + + var body: some View { + VStack(alignment: .leading) { + HStack { + Image(systemName: "circle") + .foregroundColor( + service.enabled ? (service.available ? .green : .red) : .orange + ) + .frame(maxWidth: 16, maxHeight: 16) + + VStack(alignment: .leading) { + Text(service.sourceName).font(.title3).lineLimit(1) + if !service.enabled { + Text(verbatim: "Disconnected").font(.caption) + } else { + if service.available { + let address = + if let scheme = service.scheme { + scheme + "://" + service.address.unsafelyUnwrapped + ":" + + String(service.port.unsafelyUnwrapped) + } else { + service.address.unsafelyUnwrapped + ":" + + String(service.port.unsafelyUnwrapped) + } + Text(verbatim: address).font(.caption).lineLimit(1) + } else { + Text(verbatim: "Connecting...").font(.caption) + } + } + } + Spacer() + Image(systemName: "chevron.right") + .frame(width: 32, height: 32) + .rotationEffect( + isOpen ? Angle.degrees(90.0) : Angle.degrees(0), anchor: .center) + } + .padding(3) + .contentShape(Rectangle()) + .onTapGesture { + withAnimation { + isOpen = !isOpen + } + } + .onHover { hover in + isHovered = hover + } + .background(isHovered ? Color.gray.opacity(0.25) : Color.clear) + .cornerRadius(4) + + if isOpen { + VStack(spacing: 0) { + if service.available { + if service.enabled { + let address = + service.address.unsafelyUnwrapped + ":" + + String(service.port.unsafelyUnwrapped) + if let scheme = service.scheme { + let url = + scheme + "://" + service.address.unsafelyUnwrapped + ":" + + String(service.port.unsafelyUnwrapped) + ClickableMenuEntry( + text: "Open " + url, + action: { + if let url = URL(string: url) { + NSWorkspace.shared.open(url) + } + }) + } + ClickableMenuEntry( + text: "Copy " + address, clicked: "Copied!", + action: { + copyToClipboard(address) + self.closeWindow() + }) + } + } + + if service.enabled { + ClickableMenuEntry( + text: "Disconnect", + action: { + disable_accepted_service(service.id) + }) + } else { + ClickableMenuEntry( + text: "Connect", + action: { + enable_accepted_service(service.id) + }) + } + } + } + } + } +} + + +struct RemoteServiceView_Previews: PreviewProvider { + @State static var state = swift_demo_application_state() + + static var previews: some View { + VStack { + ForEach(state.groups[0].incomingServices) { service in + RemoteServiceView(service: service) + } + ForEach(state.groups[1].incomingServices) { service in + RemoteServiceView(service: service) + } + ForEach(state.groups[2].incomingServices) { service in + RemoteServiceView(service: service) + } + } + .frame(width: 300, height: 600) + } +} diff --git a/implementations/swift/ockam/ockam_app/Ockam/Services.swift b/implementations/swift/ockam/ockam_app/Ockam/Services.swift index b3d81985f4b..075da592eed 100644 --- a/implementations/swift/ockam/ockam_app/Ockam/Services.swift +++ b/implementations/swift/ockam/ockam_app/Ockam/Services.swift @@ -85,108 +85,6 @@ struct ServiceGroupButton: View { } } -struct RemoteServiceView: View { - @State private var isHovered = false - @State private var isOpen = false - @ObservedObject var service: Service - - @Environment(\.presentationMode) var presentationMode: Binding - func closeWindow() { - self.presentationMode.wrappedValue.dismiss() - } - - var body: some View { - VStack(alignment: .leading) { - HStack { - Image(systemName: "circle") - .foregroundColor( - service.available ? (service.enabled ? .green : .orange) : .red - ) - .frame(maxWidth: 16, maxHeight: 16) - - VStack(alignment: .leading) { - Text(service.sourceName).font(.title3).lineLimit(1) - if service.available { - let address = - if let scheme = service.scheme { - scheme + "://" + service.address.unsafelyUnwrapped + ":" - + String(service.port.unsafelyUnwrapped) - } else { - service.address.unsafelyUnwrapped + ":" - + String(service.port.unsafelyUnwrapped) - } - Text(verbatim: address).font(.caption).lineLimit(1) - } else { - Text(verbatim: "Connecting...").font(.caption) - } - } - Spacer() - if service.available { - Image(systemName: "chevron.right") - .frame(width: 32, height: 32) - .rotationEffect( - isOpen ? Angle.degrees(90.0) : Angle.degrees(0), anchor: .center) - } - } - .padding(3) - .contentShape(Rectangle()) - .onTapGesture { - if service.available { - withAnimation { - isOpen = !isOpen - } - } - } - .onHover { hover in - isHovered = hover - } - .background(isHovered ? Color.gray.opacity(0.25) : Color.clear) - .cornerRadius(4) - - if isOpen { - VStack(spacing: 0) { - if service.available { - if service.enabled { - let address = - service.address.unsafelyUnwrapped + ":" - + String(service.port.unsafelyUnwrapped) - if let scheme = service.scheme { - let url = - scheme + "://" + service.address.unsafelyUnwrapped + ":" - + String(service.port.unsafelyUnwrapped) - ClickableMenuEntry( - text: "Open " + url, - action: { - if let url = URL(string: url) { - NSWorkspace.shared.open(url) - } - }) - } - ClickableMenuEntry( - text: "Copy " + address, clicked: "Copied!", - action: { - copyToClipboard(address) - self.closeWindow() - }) - ClickableMenuEntry( - text: "Disconnect", - action: { - disable_accepted_service(service.id) - }) - } else { - ClickableMenuEntry( - text: "Connect", - action: { - enable_accepted_service(service.id) - }) - } - } - } - } - } - } -} - struct LocalServiceView: View { @Environment(\.openWindow) private var openWindow