From db0ac0fcf223c09e807af7d044ea0e1a2aeef3b0 Mon Sep 17 00:00:00 2001 From: Paulo Cabral Date: Tue, 30 Jan 2024 15:54:52 -0300 Subject: [PATCH 01/21] Avoid blocking commit in the last fix batch --- lib/dal/src/job/definition/fix.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/lib/dal/src/job/definition/fix.rs b/lib/dal/src/job/definition/fix.rs index b98994a74a..dbe4e5c692 100644 --- a/lib/dal/src/job/definition/fix.rs +++ b/lib/dal/src/job/definition/fix.rs @@ -144,6 +144,7 @@ impl JobConsumer for FixesJob { fix_items.push(item.clone()); } } + let should_blocking_commit = fixes.len() != fix_items.len(); debug!( ?fixes, @@ -190,6 +191,7 @@ impl JobConsumer for FixesJob { self.batch_id, fix_item, Span::current(), + should_blocking_commit, )) .await; (id, res) @@ -329,6 +331,7 @@ async fn fix_task( batch_id: FixBatchId, fix_item: FixItem, parent_span: Span, + should_blocking_commit: bool, ) -> JobConsumerResult<(Fix, Vec)> { let deleted_ctx = &ctx.clone_with_delete_visibility(); // Get the workflow for the action we need to run. @@ -386,7 +389,14 @@ async fn fix_task( // consecutive fixes that depend on the /root/resource from the previous fix. // `blocking_commit()` will wait for any jobs that have ben created through // `enqueue_job(...)` to finish before moving on. - ctx.blocking_commit().await?; + if should_blocking_commit { + ctx.blocking_commit().await?; + } else { + if ctx.blocking() { + info!("Blocked on commit that should not block of fix definition"); + } + ctx.commit().await?; + } if matches!(completion_status, FixCompletionStatus::Success) { if let Err(err) = component.act(&ctx, ActionKind::Refresh).await { @@ -494,7 +504,7 @@ async fn process_failed_fix_inner( } } - ctx.blocking_commit().await?; + ctx.commit().await?; } Ok(()) From 4402cb428f3d10afa8cf49747c852fa6132854c8 Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 27 Feb 2024 23:29:14 +0000 Subject: [PATCH 02/21] feat(lang-js): Export YAML load API in sandbox We need to expose the ability to load strings as YAML for some of the modelling --- bin/lang-js/src/sandbox.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/lang-js/src/sandbox.ts b/bin/lang-js/src/sandbox.ts index 678e54bf85..f7cc5622ed 100644 --- a/bin/lang-js/src/sandbox.ts +++ b/bin/lang-js/src/sandbox.ts @@ -28,7 +28,7 @@ function commonSandbox(executionId: string): Sandbox { zlib, fetch, siExec: makeExec(executionId), - YAML: { stringify: yaml.dump }, + YAML: { stringify: yaml.dump, parse: yaml.load }, os, fs, path, From 84f00c43799b3ee133f8cd01a937f1fae74bc110 Mon Sep 17 00:00:00 2001 From: John Watson Date: Wed, 28 Feb 2024 10:43:48 +0000 Subject: [PATCH 03/21] fix(cypress): amend create component to be working with new UI --- app/web/.env | 4 ++-- .../e2e/modelling-functionality/create-component.cy.ts | 2 +- .../e2e/modelling-functionality/delete-component.cy.ts | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/app/web/.env b/app/web/.env index 97a71669f7..efa48ace33 100644 --- a/app/web/.env +++ b/app/web/.env @@ -16,8 +16,8 @@ VITE_AUTH0_DOMAIN=systeminit.auth0.com # Remote Executing / E2E Specifities for Testing Production, local simply change the values to something like http://locahost:8080 & the relevant component IDs # VITE_SI_WORKSPACE_URL = https://app.systeminit.com/ # Production URL # VITE_SI_WORKSPACE_ID = 01HPMKZZ0DF54B12FNBF6Z7704 # Production Workspace URL Used for Synthetics -# VITE_SI_PROPAGATION_COMPONENT_A = c_01HPMM4A95M2E3V6STMZZCZJRZ # Production: Used for E2E Value Propagation Test [output socket i.e. from] -# VITE_SI_PROPAGATION_COMPONENT_B = c_01HPMM4BC682PFCYD7GE3X3AQW # Production: Used for E2E Value Propagation Test [input socket i.e. to] +# VITE_SI_PROPAGATION_COMPONENT_A = c_01HQQP1XXK144KDJ7SPTCGRZGV # Production: Used for E2E Value Propagation Test [output socket i.e. from] +# VITE_SI_PROPAGATION_COMPONENT_B = c_01HQQP2MJ767AR9FPKP051Y9HS # Production: Used for E2E Value Propagation Test [input socket i.e. to] # VITE_SI_CYPRESS_MULTIPLIER=1 # How many times to run each test in cypress, only changes modelling tests diff --git a/app/web/cypress/e2e/modelling-functionality/create-component.cy.ts b/app/web/cypress/e2e/modelling-functionality/create-component.cy.ts index 368370a676..eaf4861617 100644 --- a/app/web/cypress/e2e/modelling-functionality/create-component.cy.ts +++ b/app/web/cypress/e2e/modelling-functionality/create-component.cy.ts @@ -23,7 +23,7 @@ Cypress._.times(import.meta.env.VITE_SI_CYPRESS_MULTIPLIER ? import.meta.env.VIT cy.url().should('not.include', 'head', { timeout: 10000 }); // Find the AWS Credential - cy.get('[data-cy="asset_card', { timeout: 30000 }).contains('AWS Credential').should('be.visible').as('awsCred') + cy.get('div[class="tree-node"]', { timeout: 30000 }).contains('AWS Credential').should('be.visible').as('awsCred') // Find the canvas to get a location to drag to cy.get('canvas').first().as('konvaStage'); diff --git a/app/web/cypress/e2e/modelling-functionality/delete-component.cy.ts b/app/web/cypress/e2e/modelling-functionality/delete-component.cy.ts index 571d794512..bf8ae0348d 100644 --- a/app/web/cypress/e2e/modelling-functionality/delete-component.cy.ts +++ b/app/web/cypress/e2e/modelling-functionality/delete-component.cy.ts @@ -22,7 +22,7 @@ Cypress._.times(import.meta.env.VITE_SI_CYPRESS_MULTIPLIER ? import.meta.env.VIT cy.url().should('not.include', 'head', { timeout: 10000 }); // Find the AWS Credential - cy.get('[data-cy="asset_card', { timeout: 30000 }).contains('AWS Credential', { timeout: 30000 }).should('be.visible').as('awsCred') + cy.get('div[class="tree-node"]', { timeout: 30000 }).contains('AWS Credential').should('be.visible').as('awsCred') // Find the canvas to get a location to drag to cy.get('canvas').first().as('konvaStage'); From 4de21a21778f4f714a84d64105bf68452b3e32af Mon Sep 17 00:00:00 2001 From: John Watson Date: Wed, 28 Feb 2024 12:00:46 +0000 Subject: [PATCH 04/21] feat(cypress): increase frequency, add retry, add slackhook for failure --- .github/workflows/e2e-validation.yml | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/.github/workflows/e2e-validation.yml b/.github/workflows/e2e-validation.yml index 0c787ecbdc..09b43885ca 100644 --- a/.github/workflows/e2e-validation.yml +++ b/.github/workflows/e2e-validation.yml @@ -3,7 +3,7 @@ name: Cypress E2E Tests on: workflow_dispatch: schedule: - - cron: '*/15 * * * *' # Runs every 5 minutes + - cron: '*/5 * * * *' # Runs every 5 minutes jobs: cypress-tests: @@ -43,12 +43,17 @@ jobs: with: version: 8.10.5 - - name: Run Cypress tests + - name: Install Cypress working-directory: app/web run: | pnpm i pnpm install cypress - npx cypress run --spec "cypress/e2e/**" + + - uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 20 + command: cd app/web && npx cypress run --spec "cypress/e2e/**" - name: 'Upload Cypress Recordings to Github' uses: actions/upload-artifact@v4 @@ -58,10 +63,7 @@ jobs: path: app/web/cypress/videos/**/*.mp4 retention-days: 5 - # TODO(johnrwatson): Enable this when we're happy with the synthetic above - #- name: Send PagerDuty alert on failure - # if: ${{ failure() }} - # uses: Entle/action-pagerduty-alert@0.2.0 - # with: - # pagerduty-integration-key: '${{ secrets.PAGERDUTY_INTEGRATION_KEY }}' - # pagerduty-dedup-key: github_workflow_failed \ No newline at end of file + - name: Send Slack Failure Webhook + if: failure() + run: | + curl -X POST -H 'Content-type: application/json' --data "{\"text\": \":si: Failed Cypress E2E Test for Production: \"}" ${{ secrets.SLACK_WEBHOOK_URL }} From d3beea10998961e81ecc50d52063dac5d21e40c5 Mon Sep 17 00:00:00 2001 From: John Watson Date: Wed, 28 Feb 2024 18:53:43 +0000 Subject: [PATCH 05/21] fix(cypress): amend div selector for delete --- .../cypress/e2e/modelling-functionality/delete-component.cy.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/web/cypress/e2e/modelling-functionality/delete-component.cy.ts b/app/web/cypress/e2e/modelling-functionality/delete-component.cy.ts index bf8ae0348d..49bb1e6a3d 100644 --- a/app/web/cypress/e2e/modelling-functionality/delete-component.cy.ts +++ b/app/web/cypress/e2e/modelling-functionality/delete-component.cy.ts @@ -37,7 +37,7 @@ Cypress._.times(import.meta.env.VITE_SI_CYPRESS_MULTIPLIER ? import.meta.env.VIT .rightclick(); // Click the second dropdown menu item - cy.get('#dropdown-menu-item-2').click(); + cy.get('[class="truncate"]').contains("Delete Frame").parent().parent().click(); // Click the destroy button cy.get('button.vbutton.--variant-solid.--size-md.--tone-destructive') From 91ddc444ffa782bcecd04b086154f7cc5e30ecef Mon Sep 17 00:00:00 2001 From: Zachary Hamm Date: Tue, 27 Feb 2024 14:34:51 -0600 Subject: [PATCH 06/21] feat(dal,sdf): track whether a component is using the default variant Changes the component summary table to have a boolean flag indicating whether a component is using the default variant for a schema. (This lets us determine whether we should upgrade the component to the default). --- lib/dal/src/component/migrate.rs | 15 ++ lib/dal/src/diagram.rs | 2 + lib/dal/src/diagram/summary_diagram.rs | 24 ++- ...ault_variant_bool_to_component_summary.sql | 169 ++++++++++++++++++ lib/dal/src/pkg.rs | 3 + lib/dal/src/pkg/export.rs | 13 +- lib/dal/src/pkg/import.rs | 46 ++++- .../src/server/service/variant_definition.rs | 23 ++- .../variant_definition/exec_variant_def.rs | 11 ++ 9 files changed, 284 insertions(+), 22 deletions(-) create mode 100644 lib/dal/src/migrations/U2611__add_use_default_variant_bool_to_component_summary.sql diff --git a/lib/dal/src/component/migrate.rs b/lib/dal/src/component/migrate.rs index c647ca2f0b..25821b4fd9 100644 --- a/lib/dal/src/component/migrate.rs +++ b/lib/dal/src/component/migrate.rs @@ -3,6 +3,7 @@ use telemetry::prelude::*; use thiserror::Error; use crate::{ + diagram::{self, summary_diagram::SummaryDiagramError}, property_editor::values_summary::{ PropertyEditorValuesSummary, PropertyEditorValuesSummaryError, }, @@ -45,6 +46,8 @@ pub enum ComponentMigrateError { Socket(#[from] SocketError), #[error("standard model error: {0}")] StandardModel(#[from] StandardModelError), + #[error("summary diagram error: {0}")] + SummaryDiagram(#[from] SummaryDiagramError), } pub type ComponentMigrateResult = Result; @@ -103,9 +106,21 @@ pub async fn migrate_component_to_schema_variant( restore_prototypes_and_implicit_values(ctx, component_id).await?; } + // Update all the cached summaries for the new component, this part is pretty expensive AttributeValue::remove_dependency_summaries_for_deleted_values(ctx).await?; AttributeValue::update_component_dependencies(ctx, component_id).await?; PropertyEditorValuesSummary::create_or_update_component_entry(ctx, component_id).await?; + diagram::summary_diagram::update_socket_summary(ctx, &new_component).await?; + diagram::summary_diagram::component_update( + ctx, + &component_id, + new_component.name(ctx).await?, + new_component.color(ctx).await?.unwrap_or_default(), + new_component.get_type(ctx).await?, + new_component.resource(ctx).await?.payload.is_some(), + None, + ) + .await?; // Restore edges if matching sockets exist in the migrated component This // should probably use the connection annotation for matching, instead of diff --git a/lib/dal/src/diagram.rs b/lib/dal/src/diagram.rs index fad5526a41..2db9a37e9a 100644 --- a/lib/dal/src/diagram.rs +++ b/lib/dal/src/diagram.rs @@ -21,6 +21,8 @@ use crate::{ pub mod connection; pub(crate) mod summary_diagram; +pub use summary_diagram::falsify_using_default_variant_for_components_of_schema; +pub use summary_diagram::{SummaryDiagramError, SummaryDiagramResult}; #[remain::sorted] #[derive(Error, Debug)] diff --git a/lib/dal/src/diagram/summary_diagram.rs b/lib/dal/src/diagram/summary_diagram.rs index 19abd483c5..31847926cc 100644 --- a/lib/dal/src/diagram/summary_diagram.rs +++ b/lib/dal/src/diagram/summary_diagram.rs @@ -94,6 +94,7 @@ pub struct SummaryDiagramComponent { created_info: serde_json::Value, updated_info: serde_json::Value, deleted_info: serde_json::Value, + using_default_variant: bool, } impl_standard_model! { @@ -207,12 +208,14 @@ pub async fn create_component_entry( } } + let using_default_variant = schema.default_schema_variant_id() == Some(schema_variant.id()); + let _row = ctx .txns() .await? .pg() .query_one( - "SELECT object FROM summary_diagram_component_create_v1($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20)", + "SELECT object FROM summary_diagram_component_create_v2($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21)", &[ ctx.tenancy(), ctx.visibility(), @@ -234,12 +237,29 @@ pub async fn create_component_entry( &serde_json::to_value(created_info)?, &serde_json::to_value(updated_info)?, &serde_json::to_value(deleted_info)?, + &using_default_variant, ], ) .await?; Ok(()) } +pub async fn falsify_using_default_variant_for_components_of_schema( + ctx: &DalContext, + schema_id: SchemaId, +) -> SummaryDiagramResult<()> { + ctx.txns() + .await? + .pg() + .execute( + "SELECT falsify_using_default_variant_for_components_of_schema_v1($1, $2, $3)", + &[ctx.tenancy(), ctx.visibility(), &schema_id], + ) + .await?; + + Ok(()) +} + pub async fn component_update_geometry( ctx: &DalContext, node_id: &NodeId, @@ -325,7 +345,7 @@ pub async fn component_update( .await? .pg() .query_one( - "SELECT object FROM summary_diagram_component_update_v2($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)", + "SELECT object FROM summary_diagram_component_update_v3($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)", &[ ctx.tenancy(), ctx.visibility(), diff --git a/lib/dal/src/migrations/U2611__add_use_default_variant_bool_to_component_summary.sql b/lib/dal/src/migrations/U2611__add_use_default_variant_bool_to_component_summary.sql new file mode 100644 index 0000000000..cda2bd6041 --- /dev/null +++ b/lib/dal/src/migrations/U2611__add_use_default_variant_bool_to_component_summary.sql @@ -0,0 +1,169 @@ +ALTER TABLE summary_diagram_components ADD COLUMN using_default_variant BOOLEAN NOT NULL DEFAULT TRUE; + +CREATE OR REPLACE FUNCTION summary_diagram_component_create_v2( + this_tenancy jsonb, + this_visibility jsonb, + this_id ident, + this_schema_name text, + this_schema_id ident, + this_schema_variant_id ident, + this_schema_variant_name text, + this_schema_category text, + this_sockets jsonb, + this_node_id ident, + this_display_name text, + this_position jsonb, + this_size jsonb, + this_color text, + this_node_type text, + this_change_status text, + this_has_resource boolean, + this_created_info jsonb, + this_updated_info jsonb, + this_deleted_info jsonb, + this_using_default_variant boolean, -- this field is the change from v1 of this function + OUT object json) AS +$$ +DECLARE + this_tenancy_record tenancy_record_v1; + this_visibility_record visibility_record_v1; + this_new_row summary_diagram_components%ROWTYPE; +BEGIN + this_tenancy_record := tenancy_json_to_columns_v1(this_tenancy); + this_visibility_record := visibility_json_to_columns_v1(this_visibility); + + INSERT INTO summary_diagram_components (id, tenancy_workspace_pk, visibility_change_set_pk, visibility_deleted_at, + component_id, display_name, node_id, schema_name, + schema_id, schema_variant_id, schema_variant_name, schema_category, + position, size, color, node_type, change_status, has_resource, created_info, + updated_info, deleted_info, sockets, using_default_variant, child_node_ids) + VALUES (this_id, this_tenancy_record.tenancy_workspace_pk, this_visibility_record.visibility_change_set_pk, + this_visibility_record.visibility_deleted_at, + this_id, this_display_name, this_node_id, this_schema_name, this_schema_id, this_schema_variant_id, + this_schema_variant_name, + this_schema_category, this_position, this_size, this_color, this_node_type, this_change_status, + this_has_resource, this_created_info, this_updated_info, this_deleted_info, + this_sockets, this_using_default_variant, jsonb_build_array()) + RETURNING * INTO this_new_row; +END +$$ LANGUAGE PLPGSQL VOLATILE; + +CREATE OR REPLACE FUNCTION summary_diagram_component_update_v3( + this_tenancy jsonb, + this_visibility jsonb, + this_component_id ident, + this_name text, + this_color text, + this_component_type text, + this_has_resource bool, + this_updated_info jsonb, + this_deleted_at timestamp with time zone, + this_deleted_info jsonb, + OUT object json) AS +$$ +DECLARE + this_tenancy_record tenancy_record_v1; + this_visibility_record visibility_record_v1; + this_new_row summary_diagram_components%ROWTYPE; + this_change_status text; + this_schema_id ident; + this_schema_name text; + this_schema_variant_id ident; + this_schema_variant_name text; + this_default_variant_id ident; +BEGIN + this_tenancy_record := tenancy_json_to_columns_v1(this_tenancy); + this_visibility_record := visibility_json_to_columns_v1(this_visibility); + + CALL force_component_summary_to_changeset_v2( + this_tenancy_record, + this_visibility_record, + this_component_id + ); + + + IF this_deleted_at IS NOT NULL THEN + this_change_status := 'deleted'; + ELSIF NOT component_summary_exists_in_head_v1( + this_tenancy_record, + this_component_id + ) + THEN + this_change_status := 'added'; + ELSE + this_change_status := 'modified'; + END IF; + + --- This is the new addition to the update, calculating the schema/variant info for the component + --- Normally one would try to express this as a series of inner joins, but that seems to explode + --- the time this query takes to execute. So we're doing it as a series of selects. + SELECT cbts.belongs_to_id + INTO STRICT this_schema_id + FROM component_belongs_to_schema_v1(this_tenancy, this_visibility) AS cbts + WHERE cbts.object_id = this_component_id LIMIT 1; + + SELECT schemas.name, schemas.default_schema_variant_id + INTO STRICT this_schema_name, this_default_variant_id + FROM schemas_v1(this_tenancy, this_visibility) AS schemas + WHERE schemas.id = this_schema_id; + + SELECT cbtsv.belongs_to_id + INTO STRICT this_schema_variant_id + FROM component_belongs_to_schema_variant_v1(this_tenancy, this_visibility) AS cbtsv + WHERE cbtsv.object_id = this_component_id LIMIT 1; + + SELECT schema_variants.name + INTO STRICT this_schema_variant_name + FROM schema_variants_v1(this_tenancy, this_visibility) AS schema_variants + WHERE schema_variants.id = this_schema_variant_id; + + UPDATE summary_diagram_components + SET display_name=this_name, + color=this_color, + node_type=this_component_type, + has_resource=this_has_resource, + updated_info=this_updated_info, + visibility_deleted_at = this_deleted_at, + deleted_info=this_deleted_info, + change_status=this_change_status, + schema_id=this_schema_id, + schema_name=this_schema_name, + schema_variant_id=this_schema_variant_id, + schema_variant_name=this_schema_variant_name, + using_default_variant=COALESCE(this_default_variant_id = this_schema_variant_id, false) + WHERE component_id = this_component_id + AND tenancy_workspace_pk = this_tenancy_record.tenancy_workspace_pk + AND visibility_change_set_pk = this_visibility_record.visibility_change_set_pk + RETURNING * INTO this_new_row; +END +$$ LANGUAGE PLPGSQL VOLATILE; + +-- When a new default schema variant is added, we need to turn "using_default_variant" to false +-- for all components that were using the default variant before the change. We don't have to +-- check anything, so long as this is only run after a new default variant is added, we're good. +CREATE OR REPLACE FUNCTION falsify_using_default_variant_for_components_of_schema_v1( + this_tenancy jsonb, + this_visibility jsonb, + this_schema_id ident +) +RETURNS VOID +AS +$$ +DECLARE + this_summary_row_id ident; +BEGIN + FOR this_summary_row_id IN + SELECT summary.id FROM summary_diagram_components_v1(this_tenancy, this_visibility) AS summary + WHERE summary.schema_id = this_schema_id AND summary.using_default_variant IS true + LOOP + PERFORM update_by_id_v1( + 'summary_diagram_components', + 'using_default_variant', + this_tenancy, + this_visibility, + this_summary_row_id, + false + ); + END LOOP; +END +$$ LANGUAGE PLPGSQL; \ No newline at end of file diff --git a/lib/dal/src/pkg.rs b/lib/dal/src/pkg.rs index 6b05cb6256..89e1f567c4 100644 --- a/lib/dal/src/pkg.rs +++ b/lib/dal/src/pkg.rs @@ -14,6 +14,7 @@ use si_pkg::{FuncSpecBackendKind, FuncSpecBackendResponseType, SiPkgError, SpecE use crate::authentication_prototype::AuthenticationPrototypeError; use crate::component::migrate::ComponentMigrateError; +use crate::diagram::summary_diagram::SummaryDiagramError; use crate::property_editor::values_summary::PropertyEditorValuesSummaryError; use crate::{ component::view::debug::ComponentDebugViewError, @@ -225,6 +226,8 @@ pub enum PkgError { StandardModelMissingBelongsTo(&'static str, &'static str, String), #[error("standard model relationship {0} found multiple belongs_to for {1} with id {2}")] StandardModelMultipleBelongsTo(&'static str, &'static str, String), + #[error("summary diagram error: {0}")] + SummaryDiagram(#[from] SummaryDiagramError), #[error(transparent)] UlidDecode(#[from] ulid::DecodeError), #[error("unable to export component: {0}")] diff --git a/lib/dal/src/pkg/export.rs b/lib/dal/src/pkg/export.rs index d329749383..f1e334fc60 100644 --- a/lib/dal/src/pkg/export.rs +++ b/lib/dal/src/pkg/export.rs @@ -1314,18 +1314,19 @@ impl PkgExporter { .await? .ok_or(ComponentError::NoSchemaVariant(*component.id()))?; - let component_variant = match self - .variant_map - .get(change_set_pk.unwrap_or(ChangeSetPk::NONE), variant.id()) - { - Some(variant_spec) => ComponentSpecVariant::WorkspaceVariant { + let component_variant = match ( + self.variant_map + .get(change_set_pk.unwrap_or(ChangeSetPk::NONE), variant.id()), + variant.is_builtin(ctx).await?, + ) { + (Some(variant_spec), false) => ComponentSpecVariant::WorkspaceVariant { variant_unique_id: variant_spec .unique_id .as_ref() .unwrap_or(&variant.id().to_string()) .to_owned(), }, - None => { + _ => { let schema = component .schema(ctx) .await? diff --git a/lib/dal/src/pkg/import.rs b/lib/dal/src/pkg/import.rs index 44f6a7c659..fd283cc311 100644 --- a/lib/dal/src/pkg/import.rs +++ b/lib/dal/src/pkg/import.rs @@ -15,7 +15,7 @@ use telemetry::prelude::*; use crate::{ authentication_prototype::{AuthenticationPrototype, AuthenticationPrototypeContext}, - component::{self}, + component, diagram, property_editor::values_summary::PropertyEditorValuesSummary, }; use crate::{ @@ -258,10 +258,35 @@ async fn import_change_set( } } - AttributeValue::remove_dependency_summaries_for_deleted_values(ctx).await?; - for component in Component::list(ctx).await? { - AttributeValue::update_component_dependencies(ctx, *component.id()).await?; - PropertyEditorValuesSummary::create_or_update_component_entry(ctx, *component.id()).await?; + if !components.is_empty() { + info!("calculating cached data for imported components."); + AttributeValue::remove_dependency_summaries_for_deleted_values(ctx).await?; + for component in Component::list(ctx).await? { + info!("calculating cached data for component {:?}", component.id()); + AttributeValue::update_component_dependencies(ctx, *component.id()).await?; + PropertyEditorValuesSummary::create_or_update_component_entry(ctx, *component.id()) + .await?; + diagram::summary_diagram::update_socket_summary(ctx, &component).await?; + + // We want the serde representation of the deleted_at value since it's + // identical to what we send to the database + let deleted_at_value = match component.visibility().deleted_at { + Some(deleted_at) => Some(serde_json::to_value(deleted_at)?), + None => None, + } + .map(|v| v.to_string()); + + diagram::summary_diagram::component_update( + ctx, + component.id(), + component.name(ctx).await?, + component.color(ctx).await?.unwrap_or_default(), + component.get_type(ctx).await?, + component.resource(ctx).await?.payload.is_some(), + deleted_at_value, + ) + .await?; + } } info!("Finished Imports: {}", Utc::now()); @@ -407,6 +432,7 @@ async fn import_component( thing_map: &mut ThingMap, force_resource_patch: bool, ) -> PkgResult> { + info!("importing component {:?}", component_spec.name); let variant = match &component_spec.variant { ComponentSpecVariant::BuiltinVariant { schema_name, @@ -461,6 +487,11 @@ async fn import_component( { Some(Thing::Component((existing_component, node))) => { if Component::schema_variant_id(ctx, *existing_component.id()).await? != *variant.id() { + info!( + "respining component {:?} onto variant {}", + existing_component.id(), + variant.name() + ); // If the component exists already, but the schema variant is // different, we need to respin the component to the change-set // specific schema variant @@ -515,6 +546,11 @@ async fn import_component( imported_json, ); + info!( + "component root implicit value: {:?}", + &component_root_implicit_value + ); + if component_root_implicit_value != serde_json::Value::Null { let root_attribute_value = component.root_attribute_value(ctx).await?; AttributeValue::update_for_context_without_propagating_dependent_values( diff --git a/lib/sdf-server/src/server/service/variant_definition.rs b/lib/sdf-server/src/server/service/variant_definition.rs index 4f8197aa3f..801727249a 100644 --- a/lib/sdf-server/src/server/service/variant_definition.rs +++ b/lib/sdf-server/src/server/service/variant_definition.rs @@ -9,12 +9,11 @@ use strum::IntoEnumIterator; use telemetry::prelude::*; use thiserror::Error; -use dal::authentication_prototype::{ - AuthenticationPrototype, AuthenticationPrototypeContext, AuthenticationPrototypeError, -}; -use dal::ws_event::AttributePrototypeContextKind; use dal::{ attribute::prototype::argument::{AttributePrototypeArgument, AttributePrototypeArgumentError}, + authentication_prototype::{ + AuthenticationPrototype, AuthenticationPrototypeContext, AuthenticationPrototypeError, + }, func::argument::{FuncArgumentError, FuncArgumentId}, installed_pkg::InstalledPkgError, pkg::PkgError, @@ -23,13 +22,15 @@ use dal::{ SchemaVariantDefinitionError as DalSchemaVariantDefinitionError, SchemaVariantDefinitionId, }, socket::SocketError, + ws_event::AttributePrototypeContextKind, ActionPrototype, ActionPrototypeContext, ActionPrototypeError, AttributeContext, AttributeContextBuilderError, AttributeContextError, AttributePrototype, - AttributePrototypeError, AttributePrototypeId, AttributeValueError, ChangeSetError, DalContext, - ExternalProvider, ExternalProviderError, Func, FuncBinding, FuncBindingError, FuncError, - FuncId, InternalProvider, InternalProviderError, LeafInputLocation, LeafKind, Prop, PropError, - PropKind, SchemaError, SchemaVariant, SchemaVariantError, SchemaVariantId, SocketId, - StandardModel, StandardModelError, TenancyError, TransactionsError, UserError, WsEventError, + AttributePrototypeError, AttributePrototypeId, AttributeValueError, ChangeSetError, + ComponentError as DalComponentError, DalContext, ExternalProvider, ExternalProviderError, Func, + FuncBinding, FuncBindingError, FuncError, FuncId, InternalProvider, InternalProviderError, + LeafInputLocation, LeafKind, Prop, PropError, PropKind, SchemaError, SchemaVariant, + SchemaVariantError, SchemaVariantId, SocketId, StandardModel, StandardModelError, TenancyError, + TransactionsError, UserError, WsEventError, }; use si_pkg::{SiPkgError, SpecError}; @@ -70,6 +71,8 @@ pub enum SchemaVariantDefinitionError { ContextTransaction(#[from] TransactionsError), #[error("error creating schema variant from definition: {0}")] CouldNotCreateSchemaVariantFromDefinition(String), + #[error("component error: {0}")] + DalComponent(#[from] DalComponentError), #[error(transparent)] ExternalProvider(#[from] ExternalProviderError), #[error("external provider not found for socket: {0}")] @@ -140,6 +143,8 @@ pub enum SchemaVariantDefinitionError { Spec(#[from] SpecError), #[error(transparent)] StandardModel(#[from] StandardModelError), + #[error("summary diagram error: {0}")] + SummaryDiagram(#[from] dal::diagram::SummaryDiagramError), #[error("tenancy error: {0}")] Tenancy(#[from] TenancyError), #[error("transparent")] diff --git a/lib/sdf-server/src/server/service/variant_definition/exec_variant_def.rs b/lib/sdf-server/src/server/service/variant_definition/exec_variant_def.rs index 9974193180..1f597002f6 100644 --- a/lib/sdf-server/src/server/service/variant_definition/exec_variant_def.rs +++ b/lib/sdf-server/src/server/service/variant_definition/exec_variant_def.rs @@ -7,6 +7,7 @@ use convert_case::{Case, Casing}; use dal::component::ComponentKind; use dal::pkg::import::{clone_and_import_funcs, import_schema_variant}; use dal::pkg::PkgExporter; +use dal::Component; use hyper::Uri; use serde::{Deserialize, Serialize}; use ulid::Ulid; @@ -547,6 +548,16 @@ pub async fn exec_variant_def_multi_variant_editing( .set_default_schema_variant_id(ctx, Some(schema_variant_id)) .await?; + // We just changed the default schema variant, so we need to mark all the + // component summaries as not using the default variant + dal::diagram::falsify_using_default_variant_for_components_of_schema(ctx, *schema.id()).await?; + for component in Component::list_for_schema(ctx, *schema.id()).await? { + WsEvent::component_updated(ctx, *component.id()) + .await? + .publish_on_commit(ctx) + .await?; + } + track( &posthog_client, ctx, From 6a3e7de9a8f7de20eb28d063b27e8829dacd4c85 Mon Sep 17 00:00:00 2001 From: Paulo Cabral Date: Wed, 28 Feb 2024 17:23:49 -0300 Subject: [PATCH 07/21] Deduplicate dependent values update Co-Authored-By: Jacob Helwig --- lib/dal/src/attribute/value.rs | 28 +++------ lib/dal/src/component.rs | 23 ++----- lib/dal/src/context.rs | 33 +++++++++- lib/dal/src/edge.rs | 17 ++---- lib/dal/src/history_event.rs | 2 +- .../job/definition/dependent_values_update.rs | 8 +-- lib/dal/src/job/processor/nats_processor.rs | 6 +- lib/dal/src/job/queue.rs | 60 ++++++++++++++++--- lib/dal/src/tenancy.rs | 2 +- .../internal/component/view/complex_func.rs | 11 +--- .../internal/provider/intra_component.rs | 11 +--- .../component/delete_property_editor_value.rs | 12 ++-- .../diagram/connect_component_to_frame.rs | 25 ++------ .../service/diagram/create_connection.rs | 13 ++-- .../src/server/service/func/save_and_exec.rs | 13 ++-- 15 files changed, 130 insertions(+), 134 deletions(-) diff --git a/lib/dal/src/attribute/value.rs b/lib/dal/src/attribute/value.rs index afda23f203..41c07b4b1b 100644 --- a/lib/dal/src/attribute/value.rs +++ b/lib/dal/src/attribute/value.rs @@ -59,9 +59,7 @@ use crate::{ FuncBindingReturnValue, FuncBindingReturnValueError, FuncBindingReturnValueId, }, }, - impl_standard_model, - job::definition::DependentValuesUpdate, - pk, + impl_standard_model, pk, standard_model::{self, TypeHint}, standard_model_accessor, standard_model_belongs_to, standard_model_has_many, AttributeContextError, AttributePrototypeArgumentError, Component, ComponentId, DalContext, @@ -746,12 +744,8 @@ impl AttributeValue { AttributeValueError::NotFound(attribute_value_id, *ctx.visibility()) })?; av.update_from_prototype_function(ctx).await?; - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - vec![attribute_value_id], - )) - .await?; + ctx.enqueue_dependent_values_update(vec![attribute_value_id]) + .await?; } Ok(()) @@ -886,12 +880,8 @@ impl AttributeValue { // already updated the initial attribute value, so is there much value? if propagate_dependent_values && !ctx.no_dependent_values() { - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - vec![new_attribute_value_id], - )) - .await?; + ctx.enqueue_dependent_values_update(vec![new_attribute_value_id]) + .await?; } if let Some(av) = AttributeValue::get_by_id(ctx, &new_attribute_value_id).await? { @@ -993,12 +983,8 @@ impl AttributeValue { } if !ctx.no_dependent_values() { - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - vec![new_attribute_value_id], - )) - .await?; + ctx.enqueue_dependent_values_update(vec![new_attribute_value_id]) + .await?; } if let Some(av) = AttributeValue::get_by_id(ctx, &new_attribute_value_id).await? { diff --git a/lib/dal/src/component.rs b/lib/dal/src/component.rs index 07eab765b4..7d2a6ec19c 100644 --- a/lib/dal/src/component.rs +++ b/lib/dal/src/component.rs @@ -21,7 +21,6 @@ use crate::diagram::summary_diagram::update_socket_summary; use crate::edge::EdgeKind; use crate::func::binding::FuncBindingError; use crate::func::binding_return_value::{FuncBindingReturnValueError, FuncBindingReturnValueId}; -use crate::job::definition::DependentValuesUpdate; use crate::schema::variant::root_prop::SiPropChild; use crate::schema::variant::{SchemaVariantError, SchemaVariantId}; use crate::schema::SchemaVariant; @@ -334,12 +333,8 @@ impl Component { // We need to make sure that *ALL* functions are run, not just those that directly // depend on the name being set. let component_av_ids = AttributeValue::ids_for_component(ctx, component.id).await?; - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - component_av_ids, - )) - .await?; + ctx.enqueue_dependent_values_update(component_av_ids) + .await?; diagram::summary_diagram::create_component_entry( ctx, @@ -1000,12 +995,7 @@ impl Component { let ids = attr_values.iter().map(|av| *av.id()).collect(); - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - ids, - )) - .await?; + ctx.enqueue_dependent_values_update(ids).await?; Ok(()) } @@ -1077,12 +1067,7 @@ impl Component { let ids = attr_values.iter().map(|av| *av.id()).collect(); - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - ids, - )) - .await?; + ctx.enqueue_dependent_values_update(ids).await?; diagram::summary_diagram::component_update( ctx, diff --git a/lib/dal/src/context.rs b/lib/dal/src/context.rs index 805781ca6f..e21a1d72e7 100644 --- a/lib/dal/src/context.rs +++ b/lib/dal/src/context.rs @@ -12,11 +12,12 @@ use veritech_client::{Client as VeritechClient, CycloneEncryptionKey}; use crate::{ job::{ + definition::{FixesJob, RefreshJob}, processor::{JobQueueProcessor, JobQueueProcessorError}, producer::{BlockingJobError, BlockingJobResult, JobProducer}, queue::JobQueue, }, - HistoryActor, StandardModel, Tenancy, TenancyError, Visibility, + AttributeValueId, HistoryActor, StandardModel, Tenancy, TenancyError, Visibility, }; /// A context type which contains handles to common core service dependencies. @@ -406,12 +407,38 @@ impl DalContext { pub async fn enqueue_job( &self, - job: Box, + job: Box, ) -> Result<(), TransactionsError> { self.txns().await?.job_queue.enqueue_job(job).await; Ok(()) } + pub async fn enqueue_fix(&self, job: Box) -> Result<(), TransactionsError> { + self.txns().await?.job_queue.enqueue_job(job).await; + Ok(()) + } + + pub async fn enqueue_refresh(&self, job: Box) -> Result<(), TransactionsError> { + self.txns().await?.job_queue.enqueue_job(job).await; + Ok(()) + } + + pub async fn enqueue_dependent_values_update( + &self, + ids: Vec, + ) -> Result<(), TransactionsError> { + self.txns() + .await? + .job_queue + .enqueue_dependent_values_update( + self.visibility().change_set_pk, + self.access_builder(), + ids, + ) + .await; + Ok(()) + } + /// Similar to `enqueue_job`, except that instead of waiting to flush the job to /// the processing system on `commit`, the job is immediately flushed, and the /// processor is expected to not return until the job has finished. Returns the @@ -548,7 +575,7 @@ impl Default for RequestContext { } /// A request context builder which requires a [`Visibility`] to be completed. -#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct AccessBuilder { /// A suitable tenancy for the consuming DAL objects. tenancy: Tenancy, diff --git a/lib/dal/src/edge.rs b/lib/dal/src/edge.rs index 1dc177924e..3d3d5ad8c6 100644 --- a/lib/dal/src/edge.rs +++ b/lib/dal/src/edge.rs @@ -9,7 +9,6 @@ use telemetry::prelude::*; use thiserror::Error; use crate::func::argument::FuncArgumentError; -use crate::job::definition::DependentValuesUpdate; use crate::node::NodeId; use crate::socket::SocketError; use crate::standard_model::objects_from_rows; @@ -559,12 +558,8 @@ impl Edge { attr_value.update_from_prototype_function(ctx).await?; - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - vec![*attr_value.id()], - )) - .await?; + ctx.enqueue_dependent_values_update(vec![*attr_value.id()]) + .await?; diagram::summary_diagram::delete_edge_entry(ctx, self) .await @@ -721,12 +716,8 @@ impl Edge { ) .await?; - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - vec![*attr_value.id()], - )) - .await?; + ctx.enqueue_dependent_values_update(vec![*attr_value.id()]) + .await?; Ok(Edge::get_by_id(ctx, &edge_id).await?) } diff --git a/lib/dal/src/history_event.rs b/lib/dal/src/history_event.rs index f579a35fc5..98da1eb001 100644 --- a/lib/dal/src/history_event.rs +++ b/lib/dal/src/history_event.rs @@ -28,7 +28,7 @@ pub enum HistoryEventError { pub type HistoryEventResult = Result; #[remain::sorted] -#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, StrumDisplay, Clone, Copy)] +#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, StrumDisplay, Clone, Copy, Hash)] pub enum HistoryActor { SystemInit, User(UserPk), diff --git a/lib/dal/src/job/definition/dependent_values_update.rs b/lib/dal/src/job/definition/dependent_values_update.rs index f9ce144ead..532f5a1871 100644 --- a/lib/dal/src/job/definition/dependent_values_update.rs +++ b/lib/dal/src/job/definition/dependent_values_update.rs @@ -212,12 +212,8 @@ impl DependentValuesUpdate { .values_completed(ctx, self.attribute_values.clone()) .await; - ctx.enqueue_job(DependentValuesUpdate::new( - self.access_builder, - self.visibility, - self.attribute_values.clone(), - )) - .await?; + ctx.enqueue_dependent_values_update(self.attribute_values.clone()) + .await?; } // No matter what, we need to finish the updater diff --git a/lib/dal/src/job/processor/nats_processor.rs b/lib/dal/src/job/processor/nats_processor.rs index 44f84341c6..d6921287b3 100644 --- a/lib/dal/src/job/processor/nats_processor.rs +++ b/lib/dal/src/job/processor/nats_processor.rs @@ -178,7 +178,11 @@ impl JobQueueProcessor for NatsProcessor { let span = Span::current(); span.record("queue.size", queue.size().await); - self.block_on_jobs(queue.drain().await) + let mut jobs = Vec::with_capacity(queue.size().await); + while let Some(element) = queue.fetch_job().await { + jobs.push(element); + } + self.block_on_jobs(jobs) .instrument(info_span!("nats_processor.block_on_jobs")) .await?; diff --git a/lib/dal/src/job/queue.rs b/lib/dal/src/job/queue.rs index 836a97929f..d6bede0c0a 100644 --- a/lib/dal/src/job/queue.rs +++ b/lib/dal/src/job/queue.rs @@ -1,19 +1,37 @@ use super::producer::JobProducer; -use std::{collections::VecDeque, sync::Arc}; +use crate::{AccessBuilder, AttributeValueId, ChangeSetPk, DependentValuesUpdate, Visibility}; +use std::{collections::HashMap, collections::HashSet, collections::VecDeque, sync::Arc}; use tokio::sync::Mutex; +type DependentValuesUpdates = + Arc>>>; #[derive(Debug, Clone, Default)] pub struct JobQueue { queue: Arc>>>, + dependent_values_update_ids: DependentValuesUpdates, } impl JobQueue { pub fn new() -> Self { Self { queue: Default::default(), + dependent_values_update_ids: Default::default(), } } + pub async fn enqueue_dependent_values_update( + &self, + change_set_pk: ChangeSetPk, + access_builder: AccessBuilder, + ids: Vec, + ) { + let mut lock = self.dependent_values_update_ids.lock().await; + + lock.entry((change_set_pk, access_builder)) + .or_default() + .extend(ids); + } + pub async fn enqueue_job(&self, job: Box) { let mut lock = self.queue.lock().await; @@ -21,22 +39,48 @@ impl JobQueue { } pub async fn fetch_job(&self) -> Option> { - self.queue.lock().await.pop_front() + match self.queue.lock().await.pop_front() { + Some(job) => Some(job), + None => self + .fetch_dependent_values_update() + .await + .map(|job| job as Box), + } } - pub async fn empty(&self) -> VecDeque> { - std::mem::take(&mut *self.queue.lock().await) + pub async fn fetch_dependent_values_update(&self) -> Option> { + let key = self + .dependent_values_update_ids + .lock() + .await + .keys() + .next() + .copied(); + if let Some((change_set_pk, access_builder)) = key { + let maybe_ids: Option> = self + .dependent_values_update_ids + .lock() + .await + .remove(&(change_set_pk, access_builder)); + maybe_ids.map(|ids| { + DependentValuesUpdate::new( + access_builder, + Visibility::new(change_set_pk, None), + ids.into_iter().collect(), + ) + }) + } else { + None + } } pub async fn is_empty(&self) -> bool { self.queue.lock().await.is_empty() + && self.dependent_values_update_ids.lock().await.is_empty() } pub async fn size(&self) -> usize { self.queue.lock().await.len() - } - - pub async fn drain(&self) -> Vec> { - self.queue.lock().await.drain(0..).collect() + + (!self.dependent_values_update_ids.lock().await.is_empty() as usize) } } diff --git a/lib/dal/src/tenancy.rs b/lib/dal/src/tenancy.rs index 30f1db4410..95d9d597fb 100644 --- a/lib/dal/src/tenancy.rs +++ b/lib/dal/src/tenancy.rs @@ -14,7 +14,7 @@ pub enum TenancyError { pub type TenancyResult = Result; -#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Clone, Copy, Hash)] pub struct Tenancy { #[serde(rename = "tenancy_workspace_pk")] workspace_pk: Option, diff --git a/lib/dal/tests/integration_test/internal/component/view/complex_func.rs b/lib/dal/tests/integration_test/internal/component/view/complex_func.rs index 2c7256fcbf..dee34cbf09 100644 --- a/lib/dal/tests/integration_test/internal/component/view/complex_func.rs +++ b/lib/dal/tests/integration_test/internal/component/view/complex_func.rs @@ -1,7 +1,6 @@ use pretty_assertions_sorted::assert_eq; use dal::func::argument::{FuncArgument, FuncArgumentKind}; -use dal::job::definition::DependentValuesUpdate; use dal::{ AttributeContext, AttributePrototypeArgument, AttributeReadContext, AttributeValue, Component, ComponentView, DalContext, ExternalProvider, Func, FuncBackendKind, FuncBackendResponseType, @@ -198,13 +197,9 @@ async fn nested_object_prop_with_complex_func(ctx: &DalContext) { .await .expect("could not update from prototype function"); - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - vec![*attribute_value_for_prototype.id()], - )) - .await - .expect("failed to enqueue job"); + ctx.enqueue_dependent_values_update(vec![*attribute_value_for_prototype.id()]) + .await + .expect("failed to enqueue job"); ctx.blocking_commit() .await diff --git a/lib/dal/tests/integration_test/internal/provider/intra_component.rs b/lib/dal/tests/integration_test/internal/provider/intra_component.rs index b7c4c8d23f..1732ad2755 100644 --- a/lib/dal/tests/integration_test/internal/provider/intra_component.rs +++ b/lib/dal/tests/integration_test/internal/provider/intra_component.rs @@ -1,7 +1,6 @@ use dal::{ attribute::context::AttributeContextBuilder, func::argument::{FuncArgument, FuncArgumentKind}, - job::definition::DependentValuesUpdate, provider::internal::InternalProvider, AttributeContext, AttributePrototypeArgument, AttributeReadContext, AttributeValue, Component, ComponentView, DalContext, ExternalProvider, Func, FuncBackendKind, FuncBackendResponseType, @@ -534,13 +533,9 @@ async fn intra_component_custom_func_update_to_external_provider(ctx: &DalContex .update_from_prototype_function(ctx) .await .expect("update from proto func"); - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - vec![*freya_value.id()], - )) - .await - .expect("failed to enqueue job"); + ctx.enqueue_dependent_values_update(vec![*freya_value.id()]) + .await + .expect("unable to enqueue dependent values update"); ctx.blocking_commit() .await diff --git a/lib/sdf-server/src/server/service/component/delete_property_editor_value.rs b/lib/sdf-server/src/server/service/component/delete_property_editor_value.rs index 7930ba3378..90de306ebe 100644 --- a/lib/sdf-server/src/server/service/component/delete_property_editor_value.rs +++ b/lib/sdf-server/src/server/service/component/delete_property_editor_value.rs @@ -3,8 +3,8 @@ use crate::service::component::{ComponentError, ComponentResult}; use axum::response::IntoResponse; use axum::Json; use dal::{ - AttributeReadContext, AttributeValue, AttributeValueId, ChangeSet, ComponentId, - DependentValuesUpdate, Prop, PropId, PropKind, StandardModel, Visibility, WsEvent, + AttributeReadContext, AttributeValue, AttributeValueId, ChangeSet, ComponentId, Prop, PropId, + PropKind, StandardModel, Visibility, WsEvent, }; use serde::{Deserialize, Serialize}; @@ -92,12 +92,8 @@ pub async fn delete_property_editor_value( av.delete_by_id(&ctx).await?; } - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - vec![*parent_av.id()], - )) - .await?; + ctx.enqueue_dependent_values_update(vec![*parent_av.id()]) + .await?; ctx.commit().await?; diff --git a/lib/sdf-server/src/server/service/diagram/connect_component_to_frame.rs b/lib/sdf-server/src/server/service/diagram/connect_component_to_frame.rs index 4fb2cb1a15..aeade44c23 100644 --- a/lib/sdf-server/src/server/service/diagram/connect_component_to_frame.rs +++ b/lib/sdf-server/src/server/service/diagram/connect_component_to_frame.rs @@ -6,7 +6,6 @@ use hyper::http::Uri; use serde::{Deserialize, Serialize}; use dal::edge::{EdgeKind, EdgeObjectId, VertexObjectKind}; -use dal::job::definition::DependentValuesUpdate; use dal::socket::{SocketEdgeKind, SocketKind}; use dal::{ node::NodeId, AttributeReadContext, AttributeValue, ChangeSet, Component, ComponentError, @@ -211,12 +210,8 @@ async fn connect_component_sockets_to_frame_inner_work( attribute_value_context, ))?; - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - vec![*attribute_value.id()], - )) - .await?; + ctx.enqueue_dependent_values_update(vec![*attribute_value.id()]) + .await?; } SocketEdgeKind::ConfigurationOutput => { let provider = ExternalProvider::find_for_socket(ctx, *parent_socket.id()) @@ -260,12 +255,8 @@ async fn connect_component_sockets_to_frame_inner_work( attribute_value_context, ))?; - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - vec![*attribute_value.id()], - )) - .await?; + ctx.enqueue_dependent_values_update(vec![*attribute_value.id()]) + .await?; } } } @@ -358,12 +349,8 @@ async fn connect_component_sockets_to_frame_inner_work( .update_from_prototype_function(ctx) .await?; - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - vec![*dest_attribute_value.id()], - )) - .await?; + ctx.enqueue_dependent_values_update(vec![*dest_attribute_value.id()]) + .await?; } } } diff --git a/lib/sdf-server/src/server/service/diagram/create_connection.rs b/lib/sdf-server/src/server/service/diagram/create_connection.rs index 2503aa5025..0ec5a8c501 100644 --- a/lib/sdf-server/src/server/service/diagram/create_connection.rs +++ b/lib/sdf-server/src/server/service/diagram/create_connection.rs @@ -2,9 +2,8 @@ use axum::extract::OriginalUri; use axum::{response::IntoResponse, Json}; use dal::edge::EdgeKind; use dal::{ - job::definition::DependentValuesUpdate, node::NodeId, socket::SocketId, AttributeReadContext, - AttributeValue, ChangeSet, Connection, InternalProvider, Node, Socket, StandardModel, - Visibility, WsEvent, + node::NodeId, socket::SocketId, AttributeReadContext, AttributeValue, ChangeSet, Connection, + InternalProvider, Node, Socket, StandardModel, Visibility, WsEvent, }; use serde::{Deserialize, Serialize}; @@ -121,12 +120,8 @@ pub async fn create_connection( .update_from_prototype_function(&ctx) .await?; - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - vec![*to_attribute_value.id()], - )) - .await?; + ctx.enqueue_dependent_values_update(vec![*to_attribute_value.id()]) + .await?; track( &posthog_client, diff --git a/lib/sdf-server/src/server/service/func/save_and_exec.rs b/lib/sdf-server/src/server/service/func/save_and_exec.rs index cf6da62032..f314652db8 100644 --- a/lib/sdf-server/src/server/service/func/save_and_exec.rs +++ b/lib/sdf-server/src/server/service/func/save_and_exec.rs @@ -5,9 +5,9 @@ use super::{ use crate::server::extract::{AccessBuilder, HandlerContext}; use axum::{response::IntoResponse, Json}; use dal::{ - job::definition::DependentValuesUpdate, ActionPrototype, AttributePrototype, AttributeValue, - AttributeValueError, AttributeValueId, ChangeSet, Component, DalContext, Func, FuncBackendKind, - FuncBackendResponseType, RootPropChild, SchemaVariant, StandardModel, WsEvent, + ActionPrototype, AttributePrototype, AttributeValue, AttributeValueError, AttributeValueId, + ChangeSet, Component, DalContext, Func, FuncBackendKind, FuncBackendResponseType, + RootPropChild, SchemaVariant, StandardModel, WsEvent, }; async fn update_values_for_func(ctx: &DalContext, func: &Func) -> FuncResult<()> { @@ -101,12 +101,7 @@ async fn update_values_for_func(ctx: &DalContext, func: &Func) -> FuncResult<()> } } - ctx.enqueue_job(DependentValuesUpdate::new( - ctx.access_builder(), - *ctx.visibility(), - value_ids, - )) - .await?; + ctx.enqueue_dependent_values_update(value_ids).await?; } Ok(()) From cf97df9a94c47369c79f7b73aeae6764c7a36e2f Mon Sep 17 00:00:00 2001 From: Paulo Cabral Date: Wed, 28 Feb 2024 19:12:14 -0300 Subject: [PATCH 08/21] Deduplicate update component Co-Authored-By: Jacob Helwig --- lib/dal/src/attribute/value.rs | 20 ++---------- lib/dal/src/context.rs | 57 ++++++++++++++++++++++++++++++++-- 2 files changed, 57 insertions(+), 20 deletions(-) diff --git a/lib/dal/src/attribute/value.rs b/lib/dal/src/attribute/value.rs index 41c07b4b1b..a5cba9a75d 100644 --- a/lib/dal/src/attribute/value.rs +++ b/lib/dal/src/attribute/value.rs @@ -866,13 +866,7 @@ impl AttributeValue { let new_attribute_value_id: AttributeValueId = row.try_get("new_attribute_value_id")?; if !context.is_component_unset() { - ctx.txns() - .await? - .pg() - .execute( - "SELECT attribute_value_dependencies_update_component_v1($1, $2, $3)", - &[ctx.tenancy(), ctx.visibility(), &context.component_id()], - ) + ctx.enqueue_dependencies_update_component(context.component_id()) .await?; } @@ -968,17 +962,7 @@ impl AttributeValue { let new_attribute_value_id: AttributeValueId = row.try_get("new_attribute_value_id")?; if !item_attribute_context.is_component_unset() { - ctx.txns() - .await? - .pg() - .execute( - "SELECT attribute_value_dependencies_update_component_v1($1, $2, $3)", - &[ - ctx.tenancy(), - ctx.visibility(), - &item_attribute_context.component_id(), - ], - ) + ctx.enqueue_dependencies_update_component(item_attribute_context.component_id()) .await?; } diff --git a/lib/dal/src/context.rs b/lib/dal/src/context.rs index e21a1d72e7..d7af618bf2 100644 --- a/lib/dal/src/context.rs +++ b/lib/dal/src/context.rs @@ -1,4 +1,4 @@ -use std::{mem, path::PathBuf, sync::Arc}; +use std::{collections::HashMap, collections::HashSet, mem, path::PathBuf, sync::Arc}; use futures::Future; use serde::{Deserialize, Serialize}; @@ -17,7 +17,8 @@ use crate::{ producer::{BlockingJobError, BlockingJobResult, JobProducer}, queue::JobQueue, }, - AttributeValueId, HistoryActor, StandardModel, Tenancy, TenancyError, Visibility, + AttributeValueId, ChangeSetPk, ComponentId, HistoryActor, StandardModel, Tenancy, TenancyError, + Visibility, }; /// A context type which contains handles to common core service dependencies. @@ -405,6 +406,21 @@ impl DalContext { new } + pub async fn enqueue_dependencies_update_component( + &self, + component_id: ComponentId, + ) -> Result<(), TransactionsError> { + self.txns() + .await? + .enqueue_dependencies_update_component( + *self.tenancy(), + self.visibility().change_set_pk, + component_id, + ) + .await; + Ok(()) + } + pub async fn enqueue_job( &self, job: Box, @@ -789,6 +805,9 @@ pub struct Transactions { nats_txn: NatsTxn, job_processor: Box, job_queue: JobQueue, + #[allow(clippy::type_complexity)] + dependencies_update_component: + Arc>>>, } impl Transactions { @@ -803,6 +822,7 @@ impl Transactions { nats_txn, job_processor, job_queue: JobQueue::new(), + dependencies_update_component: Default::default(), } } @@ -825,6 +845,7 @@ impl Transactions { fields() )] pub async fn commit_into_conns(self) -> Result { + self.run_dependencies_update_component().await?; let pg_conn = self.pg_txn.commit_into_conn().await?; let nats_conn = self.nats_txn.commit_into_conn().await?; self.job_processor.process_queue(self.job_queue).await?; @@ -842,6 +863,7 @@ impl Transactions { fields() )] pub async fn blocking_commit_into_conns(self) -> Result { + self.run_dependencies_update_component().await?; let pg_conn = self.pg_txn.commit_into_conn().await?; let nats_conn = self.nats_txn.commit_into_conn().await?; self.job_processor @@ -873,4 +895,35 @@ impl Transactions { let _ = self.rollback_into_conns().await?; Ok(()) } + + pub async fn enqueue_dependencies_update_component( + &self, + tenancy: Tenancy, + change_set_pk: ChangeSetPk, + component_id: ComponentId, + ) { + self.dependencies_update_component + .lock() + .await + .entry((tenancy, change_set_pk)) + .or_default() + .insert(component_id); + } + + async fn run_dependencies_update_component(&self) -> Result<(), TransactionsError> { + for ((tenancy, change_set_pk), component_ids) in + std::mem::take(&mut *self.dependencies_update_component.lock().await) + { + for component_id in component_ids { + let visibility = Visibility::new(change_set_pk, None); + self.pg() + .execute( + "SELECT attribute_value_dependencies_update_component_v1($1, $2, $3)", + &[&tenancy, &visibility, &component_id], + ) + .await?; + } + } + Ok(()) + } } From 0acbfad6618de87b56f47a167960a2bf7a8c0633 Mon Sep 17 00:00:00 2001 From: Scott Prutton Date: Thu, 29 Feb 2024 10:14:55 -0500 Subject: [PATCH 09/21] fix: user ids too long for aml 2023 --- bin/veritech/scripts/prepare_jailer.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/veritech/scripts/prepare_jailer.sh b/bin/veritech/scripts/prepare_jailer.sh index 3f1ba4df0f..1592165b1f 100755 --- a/bin/veritech/scripts/prepare_jailer.sh +++ b/bin/veritech/scripts/prepare_jailer.sh @@ -44,7 +44,7 @@ JAILER_NS="jailer-$SB_ID" # Create a user and group to run the execution via for one micro-vm function user_prep() { - useradd -M -u 10000$SB_ID $JAILER_NS + useradd -M -u 100$SB_ID $JAILER_NS usermod -L $JAILER_NS # This group was created earlier on the machine provisioning @@ -53,7 +53,7 @@ function user_prep() { usermod -a -G kvm $JAILER_NS } -if ! id 10000$SB_ID >/dev/null 2>&1; then +if ! id $JAILER_NS >/dev/null 2>&1; then retry user_prep fi From b9d6c8cf25783e87939d6f7ad4cda7f1a989b9ab Mon Sep 17 00:00:00 2001 From: Paulo Cabral Date: Thu, 29 Feb 2024 15:40:42 -0300 Subject: [PATCH 10/21] Avoid redundant work in component creation --- lib/dal/src/component.rs | 5 +- lib/dal/src/context.rs | 1 + ...dundant_work_during_component_creation.sql | 382 ++++++++++++++++++ 3 files changed, 387 insertions(+), 1 deletion(-) create mode 100644 lib/dal/src/migrations/U2612__avoid_redundant_work_during_component_creation.sql diff --git a/lib/dal/src/component.rs b/lib/dal/src/component.rs index 7d2a6ec19c..577e10fdb6 100644 --- a/lib/dal/src/component.rs +++ b/lib/dal/src/component.rs @@ -299,7 +299,7 @@ impl Component { .await? .pg() .query_one( - "SELECT object FROM component_create_v3($1, $2, $3, $4, $5)", + "SELECT object FROM component_create_v4($1, $2, $3, $4, $5)", &[ ctx.tenancy(), ctx.visibility(), @@ -312,6 +312,9 @@ impl Component { let component: Component = standard_model::finish_create_from_row(ctx, row).await?; + ctx.enqueue_dependencies_update_component(*component.id()) + .await?; + // Need to flesh out node so that the template data is also included in the node we // persist. But it isn't, - our node is anemic. let node = Node::new(ctx, &NodeKind::Configuration).await?; diff --git a/lib/dal/src/context.rs b/lib/dal/src/context.rs index d7af618bf2..b213f2807e 100644 --- a/lib/dal/src/context.rs +++ b/lib/dal/src/context.rs @@ -910,6 +910,7 @@ impl Transactions { .insert(component_id); } + #[instrument(level = "info", skip_all)] async fn run_dependencies_update_component(&self) -> Result<(), TransactionsError> { for ((tenancy, change_set_pk), component_ids) in std::mem::take(&mut *self.dependencies_update_component.lock().await) diff --git a/lib/dal/src/migrations/U2612__avoid_redundant_work_during_component_creation.sql b/lib/dal/src/migrations/U2612__avoid_redundant_work_during_component_creation.sql new file mode 100644 index 0000000000..40726f1e1f --- /dev/null +++ b/lib/dal/src/migrations/U2612__avoid_redundant_work_during_component_creation.sql @@ -0,0 +1,382 @@ +CREATE OR REPLACE FUNCTION component_create_v4( + this_tenancy jsonb, + this_visibility jsonb, + this_user_pk ident, + this_kind text, + this_schema_variant_id ident, + OUT object json) AS +$$ +DECLARE + this_tenancy_record tenancy_record_v1; + this_visibility_record visibility_record_v1; + this_attribute_context jsonb; + this_attribute_prototype RECORD; + this_attribute_prototypes attribute_prototypes[]; + this_attribute_values attribute_values[]; + this_attribute_value_id ident; + this_external_provider RECORD; + this_internal_provider RECORD; + this_internal_providers internal_providers[]; + this_new_attribute_value jsonb; + this_new_attribute_value_id ident; + this_parent_attribute_value_id ident; + this_prop_attribute_value RECORD; + this_schema_id ident; + this_unset_func_binding_id ident; + this_unset_func_binding_return_value_id ident; + this_unset_func_id ident; + this_new_row components%ROWTYPE; +BEGIN + this_tenancy_record := tenancy_json_to_columns_v1(this_tenancy); + this_visibility_record := visibility_json_to_columns_v1(this_visibility); + + INSERT INTO components (tenancy_workspace_pk, + visibility_change_set_pk, kind, creation_user_pk) + VALUES (this_tenancy_record.tenancy_workspace_pk, + this_visibility_record.visibility_change_set_pk, this_kind, + this_user_pk) + RETURNING * INTO this_new_row; + + -- Create unset AttributeValues for the ExternalProviders, InternalProviders, + -- and for the Props starting at the root prop, up until (and including) the + -- first Array/Map that is encountered. These will be place holders for + -- when we set values (such as the root.si.name), and do function evaluation + -- later on. + SELECT belongs_to_id + INTO STRICT this_schema_id + FROM schema_variant_belongs_to_schema + WHERE in_tenancy_and_visible_v1(this_tenancy, this_visibility, schema_variant_belongs_to_schema) + AND object_id = this_schema_variant_id; + + PERFORM set_belongs_to_v1( + 'component_belongs_to_schema', + this_tenancy, + this_visibility, + this_new_row.id, + this_schema_id + ); + PERFORM set_belongs_to_v1( + 'component_belongs_to_schema_variant', + this_tenancy, + this_visibility, + this_new_row.id, + this_schema_variant_id + ); + + -- Find the "si:unset" Func Binding, and Func Binding Return Value to use + -- when creating the Attribute Values for the External & Internal Providers. + SELECT id + INTO this_unset_func_id + FROM find_by_attr_v1('funcs', + this_tenancy, + this_visibility, + 'name', + 'si:unset'); + IF this_unset_func_id IS NULL THEN + RAISE 'attribute_value_insert_for_context_raw_v1: Unable to find Func(%) in Tenancy(%), Visibility(%)', + 'si:unset', + this_tenancy, + this_visibility; + END IF; + SELECT new_func_binding_id, new_func_binding_return_value_id + INTO this_unset_func_binding_id, this_unset_func_binding_return_value_id + FROM func_binding_create_and_execute_v1( + this_tenancy, + this_visibility, + 'null'::jsonb, + this_unset_func_id + ); + + -- External Providers + FOR this_external_provider IN + SELECT * + FROM external_providers_v1(this_tenancy, this_visibility) + WHERE schema_variant_id = this_schema_variant_id + LOOP + this_attribute_context := attribute_context_build_from_parts_v1( + ident_nil_v1(), -- Prop ID + ident_nil_v1(), -- Internal Provider ID + this_external_provider.id, -- External Provider ID + -- We won't find a component-specific prototype, since the component + -- didn't exist before calling this function, but we'll want the + -- component ID set when we go to create the Attribute Value. + this_new_row.id -- Component ID + ); + + SELECT * + INTO STRICT this_attribute_prototype + FROM attribute_prototypes_v1(this_tenancy, this_visibility) AS ap + WHERE in_attribute_context_v1(this_attribute_context, ap); + + SELECT av.object + INTO this_new_attribute_value + FROM attribute_value_create_v1( + this_tenancy, + this_visibility, + this_attribute_context, + this_unset_func_binding_id, + this_unset_func_binding_return_value_id, + NULL + ) AS av; + + PERFORM set_belongs_to_v1( + 'attribute_value_belongs_to_attribute_prototype', + this_tenancy, + this_visibility, + this_new_attribute_value ->> 'id', + this_attribute_prototype.id + ); + END LOOP; + + -- Explicit Internal Providers + FOR this_internal_provider IN + SELECT * + FROM internal_providers_v1(this_tenancy, this_visibility) + WHERE schema_variant_id = this_schema_variant_id + LOOP + this_attribute_context := attribute_context_build_from_parts_v1( + ident_nil_v1(), -- Prop ID + this_internal_provider.id, -- Internal Provider ID + ident_nil_v1(), -- External Provider ID + this_new_row.id -- Component ID + ); + + SELECT * + INTO STRICT this_attribute_prototype + FROM attribute_prototypes_v1(this_tenancy, this_visibility) AS ap + WHERE in_attribute_context_v1(this_attribute_context, ap); + + SELECT av.object + INTO this_new_attribute_value + FROM attribute_value_create_v1( + this_tenancy, + this_visibility, + this_attribute_context, + this_unset_func_binding_id, + this_unset_func_binding_return_value_id, + NULL + ) AS av; + + PERFORM set_belongs_to_v1( + 'attribute_value_belongs_to_attribute_prototype', + this_tenancy, + this_visibility, + this_new_attribute_value ->> 'id', + this_attribute_prototype.id + ); + END LOOP; + + SELECT array_agg(ip.*) + INTO STRICT this_internal_providers + FROM internal_providers_v1(this_tenancy, this_visibility) AS ip + INNER JOIN props_v1(this_tenancy, this_visibility) AS props + ON ip.prop_id = props.id + WHERE props.schema_variant_id = this_schema_variant_id; + + SELECT array_agg(ap.*) + INTO STRICT this_attribute_prototypes + FROM attribute_prototypes_v1(this_tenancy, this_visibility) AS ap + WHERE (ap.attribute_context_internal_provider_id = ANY (SELECT id FROM UNNEST(this_internal_providers)) + OR ap.attribute_context_prop_id = ANY (SELECT prop_id FROM UNNEST(this_internal_providers))) + AND ap.attribute_context_component_id = ident_nil_v1(); + + SELECT array_agg(av.*) + INTO STRICT this_attribute_values + FROM attribute_values_v1(this_tenancy, this_visibility) AS av + WHERE av.attribute_context_prop_id = ANY (SELECT prop_id FROM UNNEST(this_internal_providers)) + AND av.attribute_context_component_id = ident_nil_v1(); + + -- Implicit Internal Providers + FOREACH this_internal_provider IN ARRAY this_internal_providers + LOOP + -- Create an Attribute Value for the Internal Provider + this_attribute_context := attribute_context_build_from_parts_v1( + ident_nil_v1(), -- Prop ID + this_internal_provider.id, -- Internal Provider ID + ident_nil_v1(), -- External Provider ID + this_new_row.id -- Component ID + ); + + SELECT * + INTO STRICT this_attribute_prototype + FROM UNNEST(this_attribute_prototypes) as ap + WHERE in_attribute_context_v1(this_attribute_context, ap); + + SELECT av.object + INTO this_new_attribute_value + FROM attribute_value_create_v1( + this_tenancy, + this_visibility, + this_attribute_context, + this_unset_func_binding_id, + this_unset_func_binding_return_value_id, + NULL + ) AS av; + + PERFORM set_belongs_to_v1( + 'attribute_value_belongs_to_attribute_prototype', + this_tenancy, + this_visibility, + this_new_attribute_value ->> 'id', + this_attribute_prototype.id + ); + + -- Create an Attribute Value for the Prop. + this_attribute_context := attribute_context_build_from_parts_v1( + this_internal_provider.prop_id, -- Prop ID + ident_nil_v1(), -- Internal Provider ID + ident_nil_v1(), -- External Provider ID + this_new_row.id -- Component ID + ); + + SELECT * + INTO STRICT this_attribute_prototype + FROM UNNEST(this_attribute_prototypes) as ap + WHERE in_attribute_context_v1(this_attribute_context, ap) + LIMIT 1; + + -- See what the func_binding & func_binding_return_value are on the + -- prop-specific Attribute Value, and copy those over. + SELECT * + INTO STRICT this_prop_attribute_value + FROM UNNEST(this_attribute_values) as av + WHERE in_attribute_context_v1( + attribute_context_build_from_parts_v1( + this_internal_provider.prop_id, + ident_nil_v1(), + ident_nil_v1(), + ident_nil_v1() + ), + av + ); + + SELECT av.object + INTO this_new_attribute_value + FROM attribute_value_create_v1( + this_tenancy, + this_visibility, + this_attribute_context, + this_prop_attribute_value.func_binding_id, + this_prop_attribute_value.func_binding_return_value_id, + NULL + ) AS av; + + PERFORM set_belongs_to_v1( + 'attribute_value_belongs_to_attribute_prototype', + this_tenancy, + this_visibility, + this_new_attribute_value ->> 'id', + this_attribute_prototype.id + ); + END LOOP; + + -- Some map Props have entries for specific keys as part of the Schema + -- Variant's definition. This should only be happening for things like + -- qualifications, and code-gen, which means that it should only ever be + -- happening for the first-level map encountered from the root, when it + -- happens at all. + FOR this_prop_attribute_value IN + SELECT av.* + FROM attribute_values_v1(this_tenancy, this_visibility) AS av + INNER JOIN props_v1(this_tenancy, this_visibility) AS props + ON av.attribute_context_prop_id = props.id + WHERE props.schema_variant_id = this_schema_variant_id + AND av.key IS NOT NULL + AND av.attribute_context_component_id = ident_nil_v1() + LOOP + this_attribute_context := attribute_context_build_from_parts_v1( + this_prop_attribute_value.attribute_context_prop_id, + ident_nil_v1(), + ident_nil_v1(), + this_new_row.id + ); + + SELECT ap.* + INTO STRICT this_attribute_prototype + FROM attribute_prototypes_v1(this_tenancy, this_visibility) AS ap + INNER JOIN attribute_value_belongs_to_attribute_prototype_v1(this_tenancy, this_visibility) AS avbtap + ON ap.id = avbtap.belongs_to_id + WHERE avbtap.object_id = this_prop_attribute_value.id; + + SELECT av.object + INTO this_new_attribute_value + FROM attribute_value_create_v1( + this_tenancy, + this_visibility, + this_attribute_context, + this_prop_attribute_value.func_binding_id, + this_prop_attribute_value.func_binding_return_value_id, + this_prop_attribute_value.key + ) AS av; + + PERFORM set_belongs_to_v1( + 'attribute_value_belongs_to_attribute_prototype', + this_tenancy, + this_visibility, + this_new_attribute_value ->> 'id', + this_attribute_prototype.id + ); + END LOOP; + + -- We need to create the attribute_value_belongs_to_attribute_value + -- relationship for the Prop Attribute Values of the Component. We are doing + -- this after all of the Attribute Values have been created because we're + -- guaranteeing that they're created in topographical order, which prevents + -- us from setting the belongs_to relationship as we go along. + this_attribute_context := attribute_context_build_from_parts_v1( + NULL, -- Prop ID + ident_nil_v1(), -- Internal Provider ID + ident_nil_v1(), -- External Provider ID + this_new_row.id -- Component ID + ); + FOR this_parent_attribute_value_id, this_attribute_value_id IN + WITH RECURSIVE avbtav(parent_av_id, av_id) AS ( + SELECT parent_av.id, av.id + FROM prop_belongs_to_prop AS pbtp + INNER JOIN attribute_values_v1(this_tenancy, this_visibility) AS av + ON av.attribute_context_prop_id = pbtp.object_id + INNER JOIN attribute_values_v1(this_tenancy, this_visibility) AS parent_av + ON parent_av.attribute_context_prop_id = pbtp.belongs_to_id + WHERE in_attribute_context_v1(this_attribute_context, av) + AND av.attribute_context_component_id = this_new_row.id + AND in_attribute_context_v1(this_attribute_context, parent_av) + AND parent_av.attribute_context_component_id = this_new_row.id + ) + SELECT * FROM avbtav + LOOP + PERFORM set_belongs_to_v1( + 'attribute_value_belongs_to_attribute_value', + this_tenancy, + this_visibility, + this_attribute_value_id, + this_parent_attribute_value_id + ); + END LOOP; + + -- -- Make sure we've populated the dependency graph for this (new) component. + -- FOR this_new_attribute_value_id IN + -- SELECT av.id + -- FROM attribute_values_v1(this_tenancy, this_visibility) AS av + -- WHERE av.attribute_context_component_id = this_new_row.id + -- LOOP + -- PERFORM attribute_value_dependencies_update_v1( + -- this_tenancy_record.tenancy_workspace_pk, + -- this_visibility_record.visibility_change_set_pk, + -- this_visibility_record.visibility_deleted_at, + -- this_new_attribute_value_id + -- ); + -- END LOOP; + + -- Create a parallel record to store creation and update status, meaning that this table's id refers to components.id + INSERT INTO component_statuses (id, + tenancy_workspace_pk, + visibility_change_set_pk, + creation_user_pk, update_user_pk) + VALUES (this_new_row.id, + this_new_row.tenancy_workspace_pk, + this_new_row.visibility_change_set_pk, + this_user_pk, this_user_pk); + + object := row_to_json(this_new_row); +END; +$$ LANGUAGE PLPGSQL VOLATILE; From e51d3abbc70a0a8f697ee6b3f60172a07cad3b8e Mon Sep 17 00:00:00 2001 From: Scott Prutton Date: Thu, 29 Feb 2024 13:41:55 -0500 Subject: [PATCH 11/21] fix: need both uids to match --- bin/veritech/scripts/prepare_jailer.sh | 4 ++-- lib/deadpool-cyclone/src/instance/cyclone/local_uds.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/veritech/scripts/prepare_jailer.sh b/bin/veritech/scripts/prepare_jailer.sh index 1592165b1f..a2d0b1bc7a 100755 --- a/bin/veritech/scripts/prepare_jailer.sh +++ b/bin/veritech/scripts/prepare_jailer.sh @@ -44,7 +44,7 @@ JAILER_NS="jailer-$SB_ID" # Create a user and group to run the execution via for one micro-vm function user_prep() { - useradd -M -u 100$SB_ID $JAILER_NS + useradd -M -u 30$SB_ID $JAILER_NS usermod -L $JAILER_NS # This group was created earlier on the machine provisioning @@ -53,7 +53,7 @@ function user_prep() { usermod -a -G kvm $JAILER_NS } -if ! id $JAILER_NS >/dev/null 2>&1; then +if ! id 30$SB_ID >/dev/null 2>&1; then retry user_prep fi diff --git a/lib/deadpool-cyclone/src/instance/cyclone/local_uds.rs b/lib/deadpool-cyclone/src/instance/cyclone/local_uds.rs index 24970fe01a..0c4139b954 100644 --- a/lib/deadpool-cyclone/src/instance/cyclone/local_uds.rs +++ b/lib/deadpool-cyclone/src/instance/cyclone/local_uds.rs @@ -842,7 +842,7 @@ impl LocalFirecrackerRuntime { .arg("--exec-file") .arg("/usr/bin/firecracker") .arg("--uid") - .arg(format!("10000{}", vm_id)) + .arg(format!("30{}", vm_id)) .arg("--gid") .arg("10000") .arg("--netns") From 841dc5092d0fc7898791ba670b9367c72895573f Mon Sep 17 00:00:00 2001 From: Victor Bustamante Date: Thu, 29 Feb 2024 10:55:22 -0800 Subject: [PATCH 12/21] fix(dal,sdf): Delete components on summary immediately Signed-off-by: Victor Bustamante Co-authored-by: Adam Jacob Co-authored-by: Nick Stinemates --- lib/dal/src/component.rs | 18 ++- ...13__fix_component_delete_and_propagate.sql | 132 ++++++++++++++++++ .../service/diagram/delete_component.rs | 17 +-- 3 files changed, 152 insertions(+), 15 deletions(-) create mode 100644 lib/dal/src/migrations/U2613__fix_component_delete_and_propagate.sql diff --git a/lib/dal/src/component.rs b/lib/dal/src/component.rs index 577e10fdb6..262a0403fb 100644 --- a/lib/dal/src/component.rs +++ b/lib/dal/src/component.rs @@ -954,6 +954,8 @@ impl Component { } pub async fn delete_and_propagate(&mut self, ctx: &DalContext) -> ComponentResult<()> { + let deletion_time = Utc::now(); + // Block deletion of frames with children if self.get_type(ctx).await? != ComponentType::Component { let connected_children = Edge::list_children_for_component(ctx, self.id).await?; @@ -963,7 +965,7 @@ impl Component { } } - self.set_deleted_at(ctx, Some(Utc::now())).await?; + self.set_deleted_at(ctx, Some(deletion_time)).await?; if self.get_protected(ctx).await? { return Err(ComponentError::ComponentProtected(self.id)); @@ -980,7 +982,7 @@ impl Component { .await? .pg() .query( - "SELECT * FROM component_delete_and_propagate_v1($1, $2, $3, $4, $5)", + "SELECT * FROM component_delete_and_propagate_v3($1, $2, $3, $4, $5)", &[ ctx.tenancy(), ctx.visibility(), @@ -1000,6 +1002,18 @@ impl Component { ctx.enqueue_dependent_values_update(ids).await?; + diagram::summary_diagram::component_update( + ctx, + self.id(), + self.name(ctx).await?, + self.color(ctx).await?.unwrap_or_default(), + self.get_type(ctx).await?, + self.resource(ctx).await?.payload.is_some(), + Some(deletion_time.to_string()), + ) + .await + .map_err(|e| ComponentError::SummaryDiagram(e.to_string()))?; + Ok(()) } diff --git a/lib/dal/src/migrations/U2613__fix_component_delete_and_propagate.sql b/lib/dal/src/migrations/U2613__fix_component_delete_and_propagate.sql new file mode 100644 index 0000000000..481b343c34 --- /dev/null +++ b/lib/dal/src/migrations/U2613__fix_component_delete_and_propagate.sql @@ -0,0 +1,132 @@ +CREATE OR REPLACE FUNCTION component_delete_and_propagate_v3( + this_tenancy jsonb, + this_visibility jsonb, + this_component_id ident, + this_user_pk ident, + this_has_resource boolean +) + RETURNS TABLE + ( + object json + ) +AS +$$ +DECLARE + deleted_timestamp timestamp with time zone; + external_provider_id ident; + internal_provider_id ident; + peer_component_id ident; + table_name text; + target_id ident; + this_peer_component_ids ident[]; + this_component_av_id ident; +BEGIN + -- Outgoing Edges + FOR target_id, peer_component_id, internal_provider_id, external_provider_id IN + SELECT e.id, e.head_object_id, sbtip.belongs_to_id, sbtep.belongs_to_id + FROM edges_v1(this_tenancy, this_visibility) e + LEFT JOIN socket_belongs_to_internal_provider_v1(this_tenancy, this_visibility) sbtip + ON sbtip.object_id = e.head_socket_id + LEFT JOIN socket_belongs_to_external_provider_v1(this_tenancy, this_visibility) sbtep + ON sbtep.object_id = e.head_socket_id + WHERE e.tail_object_id = this_component_id + LOOP + SELECT delete_by_id_v1('edges', this_tenancy, this_visibility, target_id) INTO deleted_timestamp; + + -- We have to get the edge head values so we can update them after edge deletion + RETURN QUERY SELECT row_to_json(av.*) AS object + FROM attribute_values_v1(this_tenancy, this_visibility) av + WHERE attribute_context_component_id = peer_component_id + AND (attribute_context_internal_provider_id = internal_provider_id OR + attribute_context_external_provider_id = external_provider_id); + SELECT array_agg(av.attribute_context_component_id) + INTO this_peer_component_ids + FROM attribute_values_v1(this_tenancy, this_visibility) av + WHERE attribute_context_component_id = peer_component_id + AND (attribute_context_internal_provider_id = internal_provider_id OR + attribute_context_external_provider_id = external_provider_id); + + PERFORM update_by_id_v1('edges', + 'deleted_implicitly', + this_tenancy, + this_visibility || jsonb_build_object('visibility_deleted_at', deleted_timestamp), + target_id, + true); + END LOOP; + + FOR target_id, table_name IN + SELECT id, 'edges' as table_name -- Incoming Edges + FROM edges_v1(this_tenancy, this_visibility) + WHERE head_object_id = this_component_id + LOOP + -- In the future, we'll possibly want to deal differently with edges that don't exist on HEAD vs the ones that do + -- we don't make that distinction right now + SELECT delete_by_id_v1(table_name, this_tenancy, this_visibility, target_id) INTO deleted_timestamp; + + PERFORM update_by_id_v1('edges', + 'deleted_implicitly', + this_tenancy, + this_visibility || jsonb_build_object('visibility_deleted_at', deleted_timestamp), + target_id, + true); + END LOOP; + + FOR target_id, table_name IN + SELECT nbtc.object_id, 'nodes' as table_name + FROM node_belongs_to_component_v1(this_tenancy, this_visibility) nbtc + WHERE nbtc.belongs_to_id = this_component_id + UNION + SELECT nbtc.id, 'node_belongs_to_component' as table_name + FROM node_belongs_to_component_v1(this_tenancy, this_visibility) nbtc + WHERE nbtc.belongs_to_id = this_component_id + LOOP + PERFORM delete_by_id_v1(table_name, this_tenancy, this_visibility, target_id); + END LOOP; + + SELECT delete_by_id_v1('components', this_tenancy, this_visibility, this_component_id) INTO deleted_timestamp; + + -- Remove the deleted Component's AttributeValues from the dependency graph. + FOR this_component_av_id IN + SELECT av.id + FROM attribute_values_v1(this_tenancy, this_visibility) AS av + WHERE av.attribute_context_component_id = this_component_id + LOOP + PERFORM attribute_value_dependencies_update_v1( + (this_tenancy ->> 'tenancy_workspace_pk')::ident, + (this_visibility ->> 'visibility_change_set_pk')::ident, + deleted_timestamp::timestamptz, + this_component_av_id + ); + END LOOP; + + IF this_peer_component_ids IS NULL THEN + this_peer_component_ids := '{}'; + end if; + + -- Update the dependencies of all Components that used this one as an input + FOREACH peer_component_id IN ARRAY this_peer_component_ids + LOOP + PERFORM attribute_value_dependencies_update_component_v1( + this_tenancy, + this_visibility, + peer_component_id + ); + END LOOP; + + -- Mark the component as needing destruction + PERFORM update_by_id_v1('components', + 'needs_destroy', + this_tenancy, + this_visibility || jsonb_build_object('visibility_deleted_at', deleted_timestamp), + this_component_id, + this_has_resource); + + -- Ensure we now set the actor of who has deleted the component + PERFORM update_by_id_v1('components', + 'deletion_user_pk', + this_tenancy, + this_visibility || jsonb_build_object('visibility_deleted_at', deleted_timestamp), + this_component_id, + this_user_pk); +END; +$$ LANGUAGE PLPGSQL STABLE; diff --git a/lib/sdf-server/src/server/service/diagram/delete_component.rs b/lib/sdf-server/src/server/service/diagram/delete_component.rs index 516f052da2..b4c4c3e6b3 100644 --- a/lib/sdf-server/src/server/service/diagram/delete_component.rs +++ b/lib/sdf-server/src/server/service/diagram/delete_component.rs @@ -136,24 +136,15 @@ pub async fn delete_components( ) -> DiagramResult { let mut ctx = builder.build(request_ctx.build(request.visibility)).await?; - let mut force_changeset_pk = None; - if ctx.visibility().is_head() { - let change_set = ChangeSet::new(&ctx, ChangeSet::generate_name(), None).await?; - - let new_visibility = Visibility::new(change_set.pk, request.visibility.deleted_at); - - ctx.update_visibility(new_visibility); + let force_changeset_pk = ChangeSet::force_new(&mut ctx).await?; - force_changeset_pk = Some(change_set.pk); + for component_id in request.component_ids { + delete_single_component(&ctx, component_id, &original_uri, &posthog_client).await?; - WsEvent::change_set_created(&ctx, change_set.pk) + WsEvent::component_updated(&ctx, component_id) .await? .publish_on_commit(&ctx) .await?; - }; - - for component_id in request.component_ids { - delete_single_component(&ctx, component_id, &original_uri, &posthog_client).await?; } ctx.commit().await?; From 5cd6d070740f39fdc3344a65835e6686691a3588 Mon Sep 17 00:00:00 2001 From: Victor Bustamante Date: Thu, 29 Feb 2024 13:15:16 -0800 Subject: [PATCH 13/21] fix(dal): Migrate deleted component summary data Signed-off-by: Victor Bustamante Co-authored-by: Paul Stack Co-authored-by: John Watson Co-authored-by: Scott Prutton Co-authored-by: Nick Stinemates --- ..._correct_deleted_components_on_summary.sql | 59 +++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 lib/dal/src/migrations/U2614__correct_deleted_components_on_summary.sql diff --git a/lib/dal/src/migrations/U2614__correct_deleted_components_on_summary.sql b/lib/dal/src/migrations/U2614__correct_deleted_components_on_summary.sql new file mode 100644 index 0000000000..83269563ae --- /dev/null +++ b/lib/dal/src/migrations/U2614__correct_deleted_components_on_summary.sql @@ -0,0 +1,59 @@ +-- This exists to fix bad summaries for deleted components pre migration U2613 + +-- Created and deleted on the same changeset +-- if there is a component with deleted not null and a fully matching (id, changeset, workspace) summary, +-- update summary with deletion data +UPDATE summary_diagram_components sc SET + visibility_deleted_at = c.visibility_deleted_at, + change_status = 'deleted', + deleted_info = jsonb_build_object( + 'actor', jsonb_build_object('kind', 'system', 'label', 'migration 2614'), + 'timestamp', now() + ) +FROM components c +WHERE c.visibility_change_set_pk = sc.visibility_change_set_pk + AND c.tenancy_workspace_pk = sc.tenancy_workspace_pk + AND c.tenancy_workspace_pk != ident_nil_v1() + AND c.id = sc.component_id + AND c.visibility_deleted_at IS NOT NULL; + +-- Things got merged and deleted on changeset +-- There's no summary entry on the changeset the component got deleted, so we insert it +INSERT INTO summary_diagram_components (id, tenancy_workspace_pk, visibility_change_set_pk, + visibility_deleted_at, created_at, component_id, + display_name, node_id, schema_name, + schema_id, schema_variant_id, + schema_variant_name, schema_category, position, size, color, node_type, + change_status, has_resource, created_info, updated_info, deleted_info, + sockets, parent_node_id, child_node_ids) +SELECT + head_sc.id, head_sc.tenancy_workspace_pk, c.visibility_change_set_pk, + c.visibility_deleted_at, head_sc.created_at, head_sc.component_id, + head_sc.display_name, head_sc.node_id, head_sc.schema_name, + head_sc.schema_id, head_sc.schema_variant_id, + head_sc.schema_variant_name, head_sc.schema_category, head_sc.position, head_sc.size, head_sc.color, head_sc.node_type, + 'deleted', head_sc.has_resource, head_sc.created_info, head_sc.updated_info, + jsonb_build_object( -- deleted info + 'actor', jsonb_build_object('kind', 'system', 'label', 'migration 2614'), + 'timestamp', now() + ), + head_sc.sockets, head_sc.parent_node_id, head_sc.child_node_ids +FROM components c + -- This + `filter_sc.id IS NULL` at the end is an outer left join, which means + -- only get components that dont have a fully matching summaries + LEFT JOIN summary_diagram_components filter_sc ON + c.id = filter_sc.component_id AND + c.visibility_change_set_pk = filter_sc.visibility_change_set_pk AND + c.tenancy_workspace_pk = filter_sc.tenancy_workspace_pk + -- get matching head summaries (which always exist in this case) + JOIN summary_diagram_components head_sc ON + c.id = head_sc.component_id AND + head_sc.visibility_change_set_pk = ident_nil_v1() AND + c.tenancy_workspace_pk = head_sc.tenancy_workspace_pk + -- Do this for deleted components with no head version + WHERE c.visibility_deleted_at IS NOT NULL + AND c.visibility_change_set_pk != ident_nil_v1() + AND c.tenancy_workspace_pk != ident_nil_v1() + AND filter_sc.id IS NULL; + + From 0a4d9a88fb9d19f397ec9b66f7f194bb49c0d88c Mon Sep 17 00:00:00 2001 From: Scott Prutton Date: Fri, 1 Mar 2024 10:41:39 -0500 Subject: [PATCH 14/21] fix: change log formatting to text to make grafana logs readable --- component/init/configs/vector.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/component/init/configs/vector.yaml b/component/init/configs/vector.yaml index 41e24f6e7c..723a9f9076 100644 --- a/component/init/configs/vector.yaml +++ b/component/init/configs/vector.yaml @@ -12,7 +12,7 @@ sinks: inputs: ["$SI_SERVICE-journal"] compression: "gzip" encoding: - codec: "json" + codec: "text" region: "us-east-1" group_name: "/ec2/$SI_HOSTENV-$SI_SERVICE" stream_name: "{{ host }}" From 157a667bb85adeba7be5072b2201c4f13451144e Mon Sep 17 00:00:00 2001 From: Paulo Cabral Date: Fri, 1 Mar 2024 16:46:29 -0300 Subject: [PATCH 15/21] Check has_resource from summary in list actions --- lib/dal/src/action.rs | 20 ++++++++++++------- lib/dal/src/schema/variant/root_prop.rs | 2 +- .../service/change_set/list_queued_actions.rs | 8 +++----- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/lib/dal/src/action.rs b/lib/dal/src/action.rs index 76b76353d9..3991d53b01 100644 --- a/lib/dal/src/action.rs +++ b/lib/dal/src/action.rs @@ -6,6 +6,7 @@ use si_data_pg::PgError; use telemetry::prelude::*; use crate::{ + diagram::summary_diagram::{SummaryDiagramComponent, SummaryDiagramError}, impl_standard_model, pk, standard_model, standard_model_accessor_ro, ActionKind, ActionPrototype, ActionPrototypeError, ActionPrototypeId, ChangeSetPk, Component, ComponentError, ComponentId, DalContext, HistoryActor, HistoryEventError, Node, NodeError, @@ -38,6 +39,8 @@ pub enum ActionError { PrototypeNotFound(ActionPrototypeId), #[error("standard model error: {0}")] StandardModelError(#[from] StandardModelError), + #[error("summary diagram error: {0}")] + SummaryDiagram(#[from] SummaryDiagramError), #[error("transactions error: {0}")] Transactions(#[from] TransactionsError), #[error(transparent)] @@ -209,7 +212,13 @@ impl Action { .flatten() .map(|a| *a.id()) }; - let resource = component.resource(ctx).await?.payload; + let has_resource = if let Some(summary) = + SummaryDiagramComponent::get_for_component_id(ctx, *component.id()).await? + { + summary.has_resource() + } else { + component.resource(ctx).await?.payload.is_some() + }; // Figure out internal dependencies for actions of this component // @@ -227,7 +236,7 @@ impl Action { // Initial deletions happen if there is a resource and a create action, so it deletes before creating match kind { ActionKind::Create => { - if resource.is_some() { + if has_resource { let ids = action_ids_by_kind(ActionKind::Delete); actions_graph .entry(*action.id()) @@ -238,8 +247,7 @@ impl Action { } ActionKind::Delete => { // If there is a resource and a create, this is a initial deletion, so no parent - if resource.is_none() - || action_ids_by_kind(ActionKind::Create).count() == 0 + if !has_resource || action_ids_by_kind(ActionKind::Create).count() == 0 { // Every other action kind is a parent let ids = actions_by_kind @@ -256,9 +264,7 @@ impl Action { } ActionKind::Refresh | ActionKind::Other => { // If there is a resource and a create, delete actions will be initial, so our parent - if resource.is_some() - && action_ids_by_kind(ActionKind::Create).count() > 0 - { + if has_resource && action_ids_by_kind(ActionKind::Create).count() > 0 { let ids = action_ids_by_kind(ActionKind::Delete); actions_graph .entry(*action.id()) diff --git a/lib/dal/src/schema/variant/root_prop.rs b/lib/dal/src/schema/variant/root_prop.rs index 01ef66e218..7155e1cc78 100644 --- a/lib/dal/src/schema/variant/root_prop.rs +++ b/lib/dal/src/schema/variant/root_prop.rs @@ -16,7 +16,7 @@ pub mod component_type; /// This enum contains the subtree names for every direct child [`Prop`](crate::Prop) of /// [`RootProp`](RootProp). Not all children will be of the same [`PropKind`](crate::PropKind). #[remain::sorted] -#[derive(AsRefStr, EnumIter, EnumString, EnumDisplay)] +#[derive(AsRefStr, EnumIter, EnumString, EnumDisplay, Debug)] pub enum RootPropChild { /// Corresponds to the "/root/code" subtree. Code, diff --git a/lib/sdf-server/src/server/service/change_set/list_queued_actions.rs b/lib/sdf-server/src/server/service/change_set/list_queued_actions.rs index ae01f49cb5..3abcb1e65a 100644 --- a/lib/sdf-server/src/server/service/change_set/list_queued_actions.rs +++ b/lib/sdf-server/src/server/service/change_set/list_queued_actions.rs @@ -37,17 +37,15 @@ pub struct ListQueuedActionsResponse { pub async fn list_queued_actions( HandlerContext(builder): HandlerContext, - AccessBuilder(access_builder): AccessBuilder, + AccessBuilder(request_ctx): AccessBuilder, Query(request): Query, ) -> ChangeSetResult> { - let ctx = builder.build_head(access_builder).await?; + let ctx = builder.build(request_ctx.build(request.visibility)).await?; - let change_set = ChangeSet::get_by_pk(&ctx, &request.visibility.change_set_pk) + let change_set = ChangeSet::get_by_pk(&ctx, &ctx.visibility().change_set_pk) .await? .ok_or(ChangeSetError::ChangeSetNotFound)?; - let ctx = - ctx.clone_with_new_visibility(Visibility::new(change_set.pk, ctx.visibility().deleted_at)); let mut actions = HashMap::new(); for ( _, From 4e3e9e075adea0a217fb532008427ebd23732716 Mon Sep 17 00:00:00 2001 From: John Watson Date: Fri, 1 Mar 2024 22:08:54 +0000 Subject: [PATCH 16/21] feat(cypress): synthetic disabled --- .github/workflows/e2e-validation.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/e2e-validation.yml b/.github/workflows/e2e-validation.yml index 09b43885ca..1bbb779831 100644 --- a/.github/workflows/e2e-validation.yml +++ b/.github/workflows/e2e-validation.yml @@ -2,8 +2,8 @@ name: Cypress E2E Tests on: workflow_dispatch: - schedule: - - cron: '*/5 * * * *' # Runs every 5 minutes +# schedule: +# - cron: '*/5 * * * *' # Runs every 5 minutes jobs: cypress-tests: From 3e00388c6e1396949dadf6ee2b49547431431a94 Mon Sep 17 00:00:00 2001 From: wendybujalski Date: Thu, 29 Feb 2024 17:03:46 -0500 Subject: [PATCH 17/21] feat(web) - AttributesPanelItem header source select dropdown mock --- .../AttributesPanel/AttributesPanel.vue | 6 + .../AttributesPanel/AttributesPanelItem.vue | 278 ++++++++++++------ .../components/layout/navbar/NavbarButton.vue | 2 +- app/web/src/main.ts | 10 +- 4 files changed, 205 insertions(+), 91 deletions(-) diff --git a/app/web/src/components/AttributesPanel/AttributesPanel.vue b/app/web/src/components/AttributesPanel/AttributesPanel.vue index 279a7f2241..7233ab8c5c 100644 --- a/app/web/src/components/AttributesPanel/AttributesPanel.vue +++ b/app/web/src/components/AttributesPanel/AttributesPanel.vue @@ -350,4 +350,10 @@ provide(AttributesPanelContextInjectionKey, { z-index: 3; } } + +.picker_editor input { + body.dark & { + color: white; + } +} diff --git a/app/web/src/components/AttributesPanel/AttributesPanelItem.vue b/app/web/src/components/AttributesPanel/AttributesPanelItem.vue index 087478d619..c7f2527c57 100644 --- a/app/web/src/components/AttributesPanel/AttributesPanelItem.vue +++ b/app/web/src/components/AttributesPanel/AttributesPanelItem.vue @@ -11,12 +11,13 @@ '--collapsed': canHaveChildren && !isOpen, }" > - +
+
@@ -50,18 +51,24 @@ :name="icon" size="none" /> -
- - - - - +
+ + + +
+ +
@@ -74,20 +81,8 @@ - +
- -
- - -
+ + +
+
+
- -
- + This prop does not currently have any children. +
--> - +
@@ -213,13 +243,6 @@ class="attributes-panel-item__help-icon" /> --> -
- -
-