diff --git a/catalog/resource_catalog.go b/catalog/resource_catalog.go index 3c5bffaf0e..e02f479f26 100644 --- a/catalog/resource_catalog.go +++ b/catalog/resource_catalog.go @@ -124,6 +124,9 @@ func ResourceCatalog() common.Resource { if err != nil { return err } + if !d.HasChangeExcept("force_destroy") { + return nil + } var updateCatalogRequest catalog.UpdateCatalog common.DataToStructPointer(d, catalogSchema, &updateCatalogRequest) @@ -143,6 +146,10 @@ func ResourceCatalog() common.Resource { return nil } + if d.HasChange("comment") && updateCatalogRequest.Comment == "" { + updateCatalogRequest.ForceSendFields = append(updateCatalogRequest.ForceSendFields, "Comment") + } + updateCatalogRequest.Owner = "" ci, err := w.Catalogs.Update(ctx, updateCatalogRequest) diff --git a/catalog/resource_catalog_test.go b/catalog/resource_catalog_test.go index e97f40ba5f..04f8e14206 100644 --- a/catalog/resource_catalog_test.go +++ b/catalog/resource_catalog_test.go @@ -227,6 +227,67 @@ func TestUpdateCatalog(t *testing.T) { }.ApplyNoError(t) } +func TestUpdateCatalogSetEmptyComment(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + w.GetMockMetastoresAPI().EXPECT().Current(mock.Anything).Return(&catalog.MetastoreAssignment{ + MetastoreId: "d", + }, nil) + e := w.GetMockCatalogsAPI().EXPECT() + e.Update(mock.Anything, catalog.UpdateCatalog{ + Name: "a", + Comment: "", + ForceSendFields: []string{"Comment"}, + }).Return(&catalog.CatalogInfo{ + Name: "a", + Comment: "", + }, nil) + e.GetByName(mock.Anything, "a").Return(&catalog.CatalogInfo{ + Name: "a", + Comment: "", + }, nil) + }, + Resource: ResourceCatalog(), + Update: true, + ID: "a", + InstanceState: map[string]string{ + "metastore_id": "d", + "name": "a", + "comment": "c", + }, + HCL: ` + name = "a" + comment = "" + `, + }.ApplyNoError(t) +} + +func TestUpdateCatalogForceDestroyOnly(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + w.GetMockMetastoresAPI().EXPECT().Current(mock.Anything).Return(&catalog.MetastoreAssignment{ + MetastoreId: "d", + }, nil) + e := w.GetMockCatalogsAPI().EXPECT() + e.GetByName(mock.Anything, "a").Return(&catalog.CatalogInfo{ + Name: "a", + }, nil) + }, + Resource: ResourceCatalog(), + Update: true, + ID: "a", + InstanceState: map[string]string{ + "metastore_id": "d", + "name": "a", + "force_destroy": "true", + }, + HCL: ` + name = "a" + force_destroy = false + `, + }.ApplyNoError(t) +} + func TestUpdateCatalogOwnerOnly(t *testing.T) { qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { diff --git a/catalog/resource_quality_monitor.go b/catalog/resource_quality_monitor.go index 1d2beffc4e..c385a83a9e 100644 --- a/catalog/resource_quality_monitor.go +++ b/catalog/resource_quality_monitor.go @@ -100,6 +100,9 @@ func ResourceQualityMonitor() common.Resource { var update catalog.UpdateMonitor common.DataToStructPointer(d, monitorSchema, &update) update.TableName = d.Get("table_name").(string) + if update.Schedule != nil { + update.Schedule.PauseStatus = "" + } _, err = w.QualityMonitors.Update(ctx, update) if err != nil { return err diff --git a/catalog/resource_registered_model.go b/catalog/resource_registered_model.go index 0b0e24de59..9a211a7047 100644 --- a/catalog/resource_registered_model.go +++ b/catalog/resource_registered_model.go @@ -92,6 +92,9 @@ func ResourceRegisteredModel() common.Resource { return nil } + if d.HasChange("comment") && u.Comment == "" { + u.ForceSendFields = append(u.ForceSendFields, "Comment") + } u.Owner = "" _, err = w.RegisteredModels.Update(ctx, u) if err != nil { diff --git a/catalog/resource_registered_model_test.go b/catalog/resource_registered_model_test.go index 712abaf666..8a698b64ff 100644 --- a/catalog/resource_registered_model_test.go +++ b/catalog/resource_registered_model_test.go @@ -196,6 +196,47 @@ func TestRegisteredModelUpdate(t *testing.T) { }.ApplyNoError(t) } +func TestRegisteredModelUpdateCommentOnly(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockRegisteredModelsAPI().EXPECT() + e.Update(mock.Anything, catalog.UpdateRegisteredModelRequest{ + FullName: "catalog.schema.model", + Comment: "", + ForceSendFields: []string{"Comment"}, + }).Return(&catalog.RegisteredModelInfo{ + Name: "model", + CatalogName: "catalog", + SchemaName: "schema", + FullName: "catalog.schema.model", + Comment: "", + }, nil) + e.GetByFullName(mock.Anything, "catalog.schema.model").Return(&catalog.RegisteredModelInfo{ + Name: "model", + CatalogName: "catalog", + SchemaName: "schema", + FullName: "catalog.schema.model", + Comment: "", + }, nil) + }, + Resource: ResourceRegisteredModel(), + Update: true, + ID: "catalog.schema.model", + InstanceState: map[string]string{ + "name": "model", + "catalog_name": "catalog", + "schema_name": "schema", + "comment": "comment", + }, + HCL: ` + name = "model" + catalog_name = "catalog" + schema_name = "schema" + comment = "" + `, + }.ApplyNoError(t) +} + func TestRegisteredModelUpdateOwner(t *testing.T) { qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { diff --git a/catalog/resource_schema.go b/catalog/resource_schema.go index 9a0ae258a0..21234a9a28 100644 --- a/catalog/resource_schema.go +++ b/catalog/resource_schema.go @@ -100,6 +100,9 @@ func ResourceSchema() common.Resource { if err != nil { return err } + if !d.HasChangeExcept("force_destroy") { + return nil + } var updateSchemaRequest catalog.UpdateSchema common.DataToStructPointer(d, s, &updateSchemaRequest) updateSchemaRequest.FullName = d.Id() @@ -118,6 +121,10 @@ func ResourceSchema() common.Resource { return nil } + if d.HasChange("comment") && updateSchemaRequest.Comment == "" { + updateSchemaRequest.ForceSendFields = append(updateSchemaRequest.ForceSendFields, "Comment") + } + updateSchemaRequest.Owner = "" schema, err := w.Schemas.Update(ctx, updateSchemaRequest) if err != nil { diff --git a/catalog/resource_schema_test.go b/catalog/resource_schema_test.go index cdbd9ad409..58e1060296 100644 --- a/catalog/resource_schema_test.go +++ b/catalog/resource_schema_test.go @@ -135,6 +135,77 @@ func TestUpdateSchema(t *testing.T) { }.ApplyNoError(t) } +func TestUpdateSchemaSetEmptyComment(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + w.GetMockMetastoresAPI().EXPECT().Current(mock.Anything).Return(&catalog.MetastoreAssignment{ + MetastoreId: "d", + }, nil) + e := w.GetMockSchemasAPI().EXPECT() + e.Update(mock.Anything, catalog.UpdateSchema{ + FullName: "b.a", + Comment: "", + ForceSendFields: []string{"Comment"}, + }).Return(&catalog.SchemaInfo{ + FullName: "b.a", + Owner: "administrators", + }, nil) + e.GetByFullName(mock.Anything, "b.a").Return(&catalog.SchemaInfo{ + Name: "a", + CatalogName: "b", + MetastoreId: "d", + Owner: "administrators", + }, nil) + }, + Resource: ResourceSchema(), + Update: true, + ID: "b.a", + InstanceState: map[string]string{ + "metastore_id": "d", + "name": "a", + "catalog_name": "b", + "comment": "c", + }, + HCL: ` + name = "a" + catalog_name = "b" + `, + }.ApplyAndExpectData(t, map[string]any{ + "comment": "", + }) +} + +func TestUpdateSchemaChangeForceDestroy(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + w.GetMockMetastoresAPI().EXPECT().Current(mock.Anything).Return(&catalog.MetastoreAssignment{ + MetastoreId: "d", + }, nil) + e := w.GetMockSchemasAPI().EXPECT() + e.GetByFullName(mock.Anything, "b.a").Return(&catalog.SchemaInfo{ + Name: "a", + CatalogName: "b", + MetastoreId: "d", + Owner: "administrators", + }, nil) + }, + Resource: ResourceSchema(), + Update: true, + ID: "b.a", + InstanceState: map[string]string{ + "metastore_id": "d", + "name": "a", + "catalog_name": "b", + "force_destroy": "true", + }, + HCL: ` + name = "a" + catalog_name = "b" + force_destroy = false + `, + }.ApplyNoError(t) +} + func TestUpdateSchemaOwnerWithOtherFields(t *testing.T) { qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { diff --git a/catalog/resource_volume.go b/catalog/resource_volume.go index 3c92a354db..8bb3804aff 100644 --- a/catalog/resource_volume.go +++ b/catalog/resource_volume.go @@ -127,6 +127,10 @@ func ResourceVolume() common.Resource { return nil } + if d.HasChange("comment") && updateVolumeRequestContent.Comment == "" { + updateVolumeRequestContent.ForceSendFields = append(updateVolumeRequestContent.ForceSendFields, "Comment") + } + updateVolumeRequestContent.Owner = "" v, err := w.Volumes.Update(ctx, updateVolumeRequestContent) if err != nil { diff --git a/catalog/resource_volume_test.go b/catalog/resource_volume_test.go index c69ebc1306..ef6d19ee96 100644 --- a/catalog/resource_volume_test.go +++ b/catalog/resource_volume_test.go @@ -359,6 +359,65 @@ func TestVolumesUpdate(t *testing.T) { assert.Equal(t, "/Volumes/testCatalogName/testSchemaName/testNameNew", d.Get("volume_path")) } +func TestVolumesUpdateCommentOnly(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodPatch, + Resource: "/api/2.1/unity-catalog/volumes/testCatalogName.testSchemaName.testName", + ExpectedRequest: catalog.UpdateVolumeRequestContent{ + Comment: "", + ForceSendFields: []string{"Comment"}, + }, + Response: catalog.VolumeInfo{ + Name: "testName", + VolumeType: catalog.VolumeType("testVolumeType"), + CatalogName: "testCatalogName", + SchemaName: "testSchemaName", + Comment: "", + FullName: "testCatalogName.testSchemaName.testName", + }, + }, + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/volumes/testCatalogName.testSchemaName.testName?", + Response: catalog.VolumeInfo{ + Name: "testName", + VolumeType: catalog.VolumeType("testVolumeType"), + CatalogName: "testCatalogName", + SchemaName: "testSchemaName", + Comment: "", + FullName: "testCatalogName.testSchemaName.testNameNew", + }, + }, + }, + Resource: ResourceVolume(), + Update: true, + InstanceState: map[string]string{ + "name": "testName", + "catalog_name": "testCatalogName", + "schema_name": "testSchemaName", + "volume_type": "testVolumeType", + "comment": "this is a comment", + }, + ID: "testCatalogName.testSchemaName.testName", + HCL: ` + name = "testName" + volume_type = "testVolumeType" + catalog_name = "testCatalogName" + schema_name = "testSchemaName" + comment = "" + `, + }.ApplyAndExpectData(t, map[string]any{ + "name": "testName", + "volume_type": "testVolumeType", + "catalog_name": "testCatalogName", + "schema_name": "testSchemaName", + "comment": "", + "volume_path": "/Volumes/testCatalogName/testSchemaName/testNameNew", + }) +} + func TestVolumesUpdateForceNewOnCatalog(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ diff --git a/dashboards/resource_dashboard.go b/dashboards/resource_dashboard.go index de61205243..85d10e3a1a 100644 --- a/dashboards/resource_dashboard.go +++ b/dashboards/resource_dashboard.go @@ -2,9 +2,11 @@ package dashboards import ( "context" + "errors" "log" "strings" + "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/dashboards" "github.com/databricks/terraform-provider-databricks/common" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -175,9 +177,32 @@ func ResourceDashboard() common.Resource { if err != nil { return err } - return w.Lakeview.Trash(ctx, dashboards.TrashDashboardRequest{ + + // Attempt to trash the dashboard. + err = w.Lakeview.Trash(ctx, dashboards.TrashDashboardRequest{ DashboardId: d.Id(), }) + + // If the dashboard was already trashed, we'll get a 403 (Permission Denied) error. + // There may be other cases where we get a 403, so we first confirm that the + // dashboard state is actually trashed, and if so, return success. + if errors.Is(err, apierr.ErrPermissionDenied) { + dashboard, nerr := w.Lakeview.Get(ctx, dashboards.GetDashboardRequest{ + DashboardId: d.Id(), + }) + + // Return original error if we can't get the dashboard state. + if nerr != nil { + return err + } + + // If the dashboard is trashed, return success. + if dashboard.LifecycleState == dashboards.LifecycleStateTrashed { + return nil + } + } + + return err }, } } diff --git a/dashboards/resource_dashboard_test.go b/dashboards/resource_dashboard_test.go index 9016ce2dda..0cd48738bb 100644 --- a/dashboards/resource_dashboard_test.go +++ b/dashboards/resource_dashboard_test.go @@ -4,10 +4,12 @@ import ( "fmt" "testing" + "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/dashboards" "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) @@ -222,9 +224,12 @@ func TestDashboardUpdate(t *testing.T) { } func TestDashboardDelete(t *testing.T) { - qa.ResourceFixture{ + _, err := qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { - w.GetMockLakeviewAPI().EXPECT().Trash(mock.Anything, dashboards.TrashDashboardRequest{ + e := w.GetMockLakeviewAPI().EXPECT() + + // Expect the dashboard to be trashed. + e.Trash(mock.Anything, dashboards.TrashDashboardRequest{ DashboardId: "xyz", }).Return(nil) }, @@ -237,5 +242,73 @@ func TestDashboardDelete(t *testing.T) { parent_path = "/path" serialized_dashboard = "serialized_json" `, - }.ApplyNoError(t) + }.Apply(t) + + // Expect this to succeed. + assert.NoError(t, err) +} + +func TestDashboardDeletePermissionDenied(t *testing.T) { + _, err := qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockLakeviewAPI().EXPECT() + + // First, expect the dashboard to be trashed. + e.Trash(mock.Anything, dashboards.TrashDashboardRequest{ + DashboardId: "xyz", + }).Return(apierr.ErrPermissionDenied) + + // Then, expect to get the dashboard to confirm if it was already trashed. + // We confirm below that the below error is ignored and the original error is returned. + e.Get(mock.Anything, dashboards.GetDashboardRequest{ + DashboardId: "xyz", + }).Return(nil, fmt.Errorf("some other error")) + }, + Resource: ResourceDashboard(), + Delete: true, + ID: "xyz", + HCL: ` + display_name = "Dashboard name" + warehouse_id = "abc" + parent_path = "/path" + serialized_dashboard = "serialized_json" + `, + }.Apply(t) + + // We expect the original error to be returned. + assert.Equal(t, err, apierr.ErrPermissionDenied) +} + +func TestDashboardDeleteAlreadyTrashed(t *testing.T) { + _, err := qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockLakeviewAPI().EXPECT() + + // First, expect the dashboard to be trashed. + e.Trash(mock.Anything, dashboards.TrashDashboardRequest{ + DashboardId: "xyz", + }).Return(apierr.ErrPermissionDenied) + + // Then, expect to get the dashboard to confirm if it was already trashed. + e.Get(mock.Anything, dashboards.GetDashboardRequest{ + DashboardId: "xyz", + }).Return(&dashboards.Dashboard{ + DashboardId: "xyz", + ParentPath: "/path", + LifecycleState: dashboards.LifecycleStateTrashed, + }, nil) + }, + Resource: ResourceDashboard(), + Delete: true, + ID: "xyz", + HCL: ` + display_name = "Dashboard name" + warehouse_id = "abc" + parent_path = "/path" + serialized_dashboard = "serialized_json" + `, + }.Apply(t) + + // Expect this to succeed. + assert.NoError(t, err) } diff --git a/docs/data-sources/mws_network_connectivity_config.md b/docs/data-sources/mws_network_connectivity_config.md new file mode 100755 index 0000000000..c360efa5ea --- /dev/null +++ b/docs/data-sources/mws_network_connectivity_config.md @@ -0,0 +1,67 @@ +--- +subcategory: "Deployment" +--- +# databricks_mws_network_connectivity_config Data Source + +-> **Note** This data source can only be used with an account-level provider! + +Retrieves information about [databricks_mws_network_connectivity_config](../resources/mws_network_connectivity_config.md) in Databricks Account. + +## Example Usage + +Fetching information about a network connectivity configuration in Databricks Account + +```hcl +provider "databricks" { + // other configuration + account_id = "" +} + +data "databricks_mws_network_connectivity_config" "this" { + name = "ncc" +} + +output "config" { + value = data.databricks_mws_network_connectivity_config.this +} +``` + +## Argument Reference + +* `name` - (Required) Name of the network connectivity configuration. + +## Attribute Reference + +* `account_id` - The Databricks account ID associated with this network configuration. +* `creation_time` - Time in epoch milliseconds when the network was created. +* `egress_config` - Array of egress configuration objects. + * `default_rules` - Array of default rules. + * `aws_stable_ip_rule` - The stable AWS IP CIDR blocks. You can use these to configure the firewall of your resources to allow traffic from your Databricks workspace. + * `cidr_blocks` - The list of stable IP CIDR blocks from which Databricks network traffic originates when accessing your resources. + * `azure_service_endpoint_rule` - Array of Azure service endpoint rules. + * `subnets` - Array of strings representing the subnet IDs. + * `target_region` - The target region for the service endpoint. + * `target_services` - Array of target services. + * `target_rules` - Array of target rules. + * `azure_private_endpoint_rules` - Array of private endpoint rule objects. + * `rule_id` - The ID of a private endpoint rule. + * `network_connectivity_config_id` - The ID of a network connectivity configuration, which is the parent resource of this private endpoint rule object. + * `resource_id` - The Azure resource ID of the target resource. + * `group_id` - The sub-resource type (group ID) of the target resource. + * `endpoint_name` - The name of the Azure private endpoint resource. + * `connection_state` - The current status of this private endpoint. + * `deactivated` - Whether this private endpoint is deactivated. + * `deactivated_at` - Time in epoch milliseconds when this object was deactivated. + * `creation_time` - Time in epoch milliseconds when this object was created. + * `updated_time` - Time in epoch milliseconds when this object was updated. +* `name` - The name of the network connectivity configuration. +* `network_connectivity_config_id` - The Databricks network connectivity configuration ID. +* `region` - The region of the network connectivity configuration. +* `updated_time` - Time in epoch milliseconds when the network was updated. + +## Related Resources + +The following resources are used in the same context: + +* [databricks_mws_network_connectivity_configs](./mws_network_connectivity_configs.md) to get names of all network connectivity configurations. +* [databricks_mws_network_connectivity_config](../resources/mws_network_connectivity_config.md) to manage network connectivity configuration. diff --git a/docs/data-sources/mws_network_connectivity_configs.md b/docs/data-sources/mws_network_connectivity_configs.md new file mode 100755 index 0000000000..9d59dfa329 --- /dev/null +++ b/docs/data-sources/mws_network_connectivity_configs.md @@ -0,0 +1,55 @@ +--- +subcategory: "Deployment" +--- +# databricks_mws_network_connectivity_configs Data Source + +-> **Note** This data source can only be used with an account-level provider! + +Lists all [databricks_mws_network_connectivity_config](../resources/mws_network_connectivity_config.md) in Databricks Account. + +## Example Usage + +List all network connectivity configurations in Databricks Account + +```hcl +provider "databricks" { + // other configuration + account_id = "" +} + +data "databricks_mws_network_connectivity_configs" "this" {} + +output "all" { + value = data.databricks_mws_network_connectivity_configs.this +} +``` + +List network connectivity configurations from a specific region in Databricks Account + +```hcl +provider "databricks" { + // other configuration + account_id = "" +} + +data "databricks_mws_network_connectivity_configs" "this" { + region = "us-east-1" +} + +output "filtered" { + value = data.databricks_mws_network_connectivity_configs.this +} +``` + +## Argument Reference + +* `region` - (Optional) Filter network connectivity configurations by region. + +## Attribute Reference + +This data source exports the following attributes: + +* `names` - List of names of [databricks_mws_network_connectivity_config](./databricks_mws_network_connectivity_config.md) + +* [databricks_mws_network_connectivity_config](./mws_network_connectivity_config.md) to get information about a single network connectivity configuration. +* [databricks_mws_network_connectivity_config](../resources/mws_network_connectivity_config.md) to manage network connectivity configuration. diff --git a/docs/data-sources/registered_model.md b/docs/data-sources/registered_model.md index 065396d0c2..af60864930 100644 --- a/docs/data-sources/registered_model.md +++ b/docs/data-sources/registered_model.md @@ -48,6 +48,6 @@ The following attributes are exported: The following resources are often used in the same context: -* [databricks_registered_model](../resources/schema.md) resource to manage models within Unity Catalog. +* [databricks_registered_model](../resources/registered_model.md) resource to manage models within Unity Catalog. * [databricks_model_serving](../resources/model_serving.md) to serve this model on a Databricks serving endpoint. * [databricks_mlflow_experiment](../resources/mlflow_experiment.md) to manage [MLflow experiments](https://docs.databricks.com/data/data-sources/mlflow-experiment.html) in Databricks. diff --git a/docs/data-sources/registered_model_versions.md b/docs/data-sources/registered_model_versions.md new file mode 100644 index 0000000000..e33bb80a7b --- /dev/null +++ b/docs/data-sources/registered_model_versions.md @@ -0,0 +1,64 @@ +--- +subcategory: "Unity Catalog" +--- +# databricks_registered_model_versions Data Source + +-> This resource can only be used with a workspace-level provider! + +This resource allows you to get information about versions of [Model in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html). + +## Example Usage + +```hcl +data "databricks_registered_model_versions" "this" { + full_name = "main.default.my_model" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `full_name` - (Required) The fully-qualified name of the registered model (`catalog_name.schema_name.name`). + +## Attribute Reference + +The following attributes are exported: + +* `model_versions` - list of objects describing the model versions. Each object consists of following attributes: + * `aliases` - the list of aliases associated with this model. Each item is object consisting of following attributes: + * `alias_name` - string with the name of alias + * `version_num` - associated model version + * `catalog_name` - The name of the catalog where the schema and the registered model reside. + * `comment` - The comment attached to the registered model. + * `created_at` - the Unix timestamp at the model's creation + * `created_by` - the identifier of the user who created the model + * `full_name` - The fully-qualified name of the registered model (`catalog_name.schema_name.name`). + * `id` - The unique identifier of the model version + * `metastore_id` - the unique identifier of the metastore + * `model_version_dependencies` - block describing model version dependencies, for feature-store packaged models. Consists of following attributes: + * `dependencies` - list of dependencies consisting of following attributes: + * `function` - A function that is dependent on a SQL object: + * `function_full_name` - Full name of the dependent function + * `table` - A table that is dependent on a SQL object + * `table_full_name` - Full name of the dependent table + * `name` - The name of the registered model. + * `owner` - Name of the registered model owner. + * `run_id` - MLflow run ID used when creating the model version, if `source` was generated by an experiment run stored in an MLflow tracking server + * `run_workspace_id` - ID of the Databricks workspace containing the MLflow run that generated this model version, if applicable + * `schema_name` - The name of the schema where the registered model resides. + * `source` - URI indicating the location of the source artifacts (files) for the model version. + * `status` - Current status of the model version. + * `storage_location` - The storage location under which model version data files are stored. + * `updated_at` - the timestamp of the last time changes were made to the model + * `updated_by` - the identifier of the user who updated the model last time + * `version` - Integer model version number, used to reference the model version in API requests. + +## Related Resources + +The following resources are often used in the same context: + +* [databricks_registered_model](registered_model.md) data source to retrieve information about a model within Unity Catalog. +* [databricks_registered_model](../resources/registered.md) resource to manage models within Unity Catalog. +* [databricks_model_serving](../resources/model_serving.md) to serve this model on a Databricks serving endpoint. +* [databricks_mlflow_experiment](../resources/mlflow_experiment.md) to manage [MLflow experiments](https://docs.databricks.com/data/data-sources/mlflow-experiment.html) in Databricks. diff --git a/docs/data-sources/serving_endpoints.md b/docs/data-sources/serving_endpoints.md new file mode 100644 index 0000000000..c1d314880a --- /dev/null +++ b/docs/data-sources/serving_endpoints.md @@ -0,0 +1,55 @@ +--- +subcategory: "Serving" +--- +# databricks_serving_endpoints Data Source + +-> This resource can only be used with a workspace-level provider! + +This resource allows you to get information about [Model Serving](https://docs.databricks.com/machine-learning/model-serving/index.html) endpoints in Databricks. + +## Example Usage + +```hcl +data "databricks_serving_endpoints" "all" { +} + +resource "databricks_permissions" "ml_serving_usage" { + for_each = databricks_serving_endpoints.all.endpoints + serving_endpoint_id = each.value.id + + access_control { + group_name = "users" + permission_level = "CAN_VIEW" + } + + access_control { + group_name = databricks_group.auto.display_name + permission_level = "CAN_MANAGE" + } + + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_QUERY" + } +} +``` + +## Attribute Reference + +The following attributes are exported: + +* `endpoints` - List of objects describing the serving endpoints. Each object consists of following attributes: + * `name` - The name of the model serving endpoint. + * `config` - The model serving endpoint configuration. + * `tags` - Tags to be attached to the serving endpoint and automatically propagated to billing logs. + * `rate_limits` - A list of rate limit blocks to be applied to the serving endpoint. + * `ai_gateway` - A block with AI Gateway configuration for the serving endpoint. + * `route_optimized` - A boolean enabling route optimization for the endpoint. + +See [`databricks_model_serving` resource](../resources/model_serving.md) for the full list of attributes for each block + +## Related Resources + +The following resources are often used in the same context: + +* [databricks_permissions](../resources/permissions.md#model-serving-usage) can control which groups or individual users can *Manage*, *Query* or *View* individual serving endpoints. diff --git a/docs/resources/mws_ncc_private_endpoint_rule.md b/docs/resources/mws_ncc_private_endpoint_rule.md index 50fba93908..c9ce270cff 100644 --- a/docs/resources/mws_ncc_private_endpoint_rule.md +++ b/docs/resources/mws_ncc_private_endpoint_rule.md @@ -35,7 +35,7 @@ The following arguments are available: * `network_connectivity_config_id` - Canonical unique identifier of Network Connectivity Config in Databricks Account. Change forces creation of a new resource. * `resource_id` - The Azure resource ID of the target resource. Change forces creation of a new resource. -* `group_id` - The sub-resource type (group ID) of the target resource. Must be one of `blob`, `dfs`, `sqlServer` or `mysqlServer`. Note that to connect to workspace root storage (root DBFS), you need two endpoints, one for blob and one for dfs. Change forces creation of a new resource. +* `group_id` - The sub-resource type (group ID) of the target resource. Must be one of supported resource types (i.e., `blob`, `dfs`, `sqlServer` , etc. Consult the [Azure documentation](https://learn.microsoft.com/en-us/azure/private-link/private-endpoint-overview#private-link-resource) for full list of supported resources). Note that to connect to workspace root storage (root DBFS), you need two endpoints, one for `blob` and one for `dfs`. Change forces creation of a new resource. ## Attribute Reference diff --git a/docs/resources/mws_workspaces.md b/docs/resources/mws_workspaces.md index 4f48777274..4d5e45d3bd 100644 --- a/docs/resources/mws_workspaces.md +++ b/docs/resources/mws_workspaces.md @@ -26,7 +26,7 @@ To get workspace running, you have to configure a couple of things: * [databricks_mws_credentials](mws_credentials.md) - You can share a credentials (cross-account IAM role) configuration ID with multiple workspaces. It is not required to create a new one for each workspace. * [databricks_mws_storage_configurations](mws_storage_configurations.md) - You can share a root S3 bucket with multiple workspaces in a single account. You do not have to create new ones for each workspace. If you share a root S3 bucket for multiple workspaces in an account, data on the root S3 bucket is partitioned into separate directories by workspace. -* [databricks_mws_networks](mws_networks.md) - (optional, but recommended) You can share one [customer-managed VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) with multiple workspaces in a single account. You do not have to create a new VPC for each workspace. However, you cannot reuse subnets or security groups with other resources, including other workspaces or non-Databricks resources. If you plan to share one VPC with multiple workspaces, be sure to size your VPC and subnets accordingly. Because a Databricks [databricks_mws_networks](mws_networks.md) encapsulates this information, you cannot reuse it across workspaces. +* [databricks_mws_networks](mws_networks.md) - (optional, but recommended) You can share one [customer-managed VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) with multiple workspaces in a single account. However, Databricks recommends using unique subnets and security groups for each workspace. If you plan to share one VPC with multiple workspaces, be sure to size your VPC and subnets accordingly. Because a Databricks [databricks_mws_networks](mws_networks.md) encapsulates this information, you cannot reuse it across workspaces. * [databricks_mws_customer_managed_keys](mws_customer_managed_keys.md) - You can share a customer-managed key across workspaces. ```hcl diff --git a/docs/resources/query.md b/docs/resources/query.md index cc8bc90edd..351fe34716 100644 --- a/docs/resources/query.md +++ b/docs/resources/query.md @@ -34,7 +34,7 @@ The following arguments are available: * `catalog` - (Optional, String) Name of the catalog where this query will be executed. * `schema` - (Optional, String) Name of the schema where this query will be executed. * `description` - (Optional, String) General description that conveys additional information about this query such as usage notes. -* `run_as_mode` - (Optional, String) Sets the "Run as" role for the object. +* `run_as_mode` - (Optional, String) Sets the "Run as" role for the object. Should be one of `OWNER`, `VIEWER`. * `tags` - (Optional, List of strings) Tags that will be added to the query. * `parameter` - (Optional, Block) Query parameter definition. Consists of following attributes (one of `*_value` is required): * `name` - (Required, String) Literal parameter marker that appears between double curly braces in the query text. diff --git a/go.mod b/go.mod index 46f813bfe0..90353293a3 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.1 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 - github.com/hashicorp/hcl/v2 v2.22.0 + github.com/hashicorp/hcl/v2 v2.23.0 github.com/hashicorp/terraform-plugin-framework v1.13.0 github.com/hashicorp/terraform-plugin-framework-validators v0.15.0 github.com/hashicorp/terraform-plugin-go v0.25.0 diff --git a/go.sum b/go.sum index c2a71615c8..a2faeac660 100644 --- a/go.sum +++ b/go.sum @@ -124,8 +124,8 @@ github.com/hashicorp/hc-install v0.9.0 h1:2dIk8LcvANwtv3QZLckxcjyF5w8KVtiMxu6G6e github.com/hashicorp/hc-install v0.9.0/go.mod h1:+6vOP+mf3tuGgMApVYtmsnDoKWMDcFXeTxCACYZ8SFg= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.22.0 h1:hkZ3nCtqeJsDhPRFz5EA9iwcG1hNWGePOTw6oyul12M= -github.com/hashicorp/hcl/v2 v2.22.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= +github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= diff --git a/internal/acceptance/data_mws_network_connectivity_config_test.go b/internal/acceptance/data_mws_network_connectivity_config_test.go new file mode 100755 index 0000000000..15dc457d62 --- /dev/null +++ b/internal/acceptance/data_mws_network_connectivity_config_test.go @@ -0,0 +1,51 @@ +package acceptance + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/terraform" +) + +func TestAccDataSourceMwsNetworkConnectivityConfigTest(t *testing.T) { + loadWorkspaceEnv(t) + if isGcp(t) { + skipf(t)("GCP not supported") + } + var sourceRegion string + if isAzure(t) { + sourceRegion = "eastus2" + } else if isAws(t) { + sourceRegion = "us-east-2" + } + AccountLevel(t, + Step{ + Template: fmt.Sprintf(` + resource "databricks_mws_network_connectivity_config" "this" { + name = "tf-{var.RANDOM}" + region = "%s" + } + + data "databricks_mws_network_connectivity_config" "this" { + depends_on = [databricks_mws_network_connectivity_config.this] + name = databricks_mws_network_connectivity_config.this.name + }`, sourceRegion), + Check: func(s *terraform.State) error { + r, ok := s.RootModule().Resources["data.databricks_mws_network_connectivity_config.this"] + if !ok { + return fmt.Errorf("data not found in state") + } + name := r.Primary.Attributes["name"] + if name == "" { + return fmt.Errorf("name is empty: %v", r.Primary.Attributes) + } + expect := sourceRegion + region := r.Primary.Attributes["region"] + if region != expect { + return fmt.Errorf("incorrect region. expected: %v, received: %v", + expect, region) + } + return nil + }, + }) +} diff --git a/internal/acceptance/data_mws_network_connectivity_configs_test.go b/internal/acceptance/data_mws_network_connectivity_configs_test.go new file mode 100755 index 0000000000..94647bb4ca --- /dev/null +++ b/internal/acceptance/data_mws_network_connectivity_configs_test.go @@ -0,0 +1,45 @@ +package acceptance + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/terraform" +) + +func TestAccDataSourceMwsNetworkConnectivityConfigsTest(t *testing.T) { + loadWorkspaceEnv(t) + if isGcp(t) { + skipf(t)("GCP not supported") + } + var region string + if isAzure(t) { + region = "eastus2" + } else if isAws(t) { + region = "us-east-2" + } + AccountLevel(t, + Step{ + Template: fmt.Sprintf(` + resource "databricks_mws_network_connectivity_configs" "this" { + name = "tf-{var.RANDOM}" + region = "%s" + } + + data "databricks_mws_network_connectivity_configs" "this" { + depends_on = [databricks_mws_network_connectivity_config.this] + region = databricks_mws_network_connectivity_config.this.region + }`, region), + Check: func(s *terraform.State) error { + r, ok := s.RootModule().Resources["data.databricks_mws_network_connectivity_configs.this"] + if !ok { + return fmt.Errorf("data not found in state") + } + names := r.Primary.Attributes["names"] + if names == "" { + return fmt.Errorf("names is empty: %v", r.Primary.Attributes) + } + return nil + }, + }) +} diff --git a/internal/acceptance/model_serving_test.go b/internal/acceptance/model_serving_test.go index fdc00a920f..808ef634ab 100644 --- a/internal/acceptance/model_serving_test.go +++ b/internal/acceptance/model_serving_test.go @@ -47,6 +47,8 @@ func TestAccModelServing(t *testing.T) { } } + data "databricks_serving_endpoints" "all" {} + resource "databricks_permissions" "ml_serving_usage" { serving_endpoint_id = databricks_model_serving.endpoint.serving_endpoint_id @@ -77,6 +79,7 @@ func TestAccModelServing(t *testing.T) { } } } + data "databricks_serving_endpoints" "all" {} `, name), }, ) diff --git a/internal/acceptance/quality_monitor_test.go b/internal/acceptance/quality_monitor_test.go index d9e8a62c51..b6a048bf52 100644 --- a/internal/acceptance/quality_monitor_test.go +++ b/internal/acceptance/quality_monitor_test.go @@ -1,6 +1,7 @@ package acceptance import ( + "fmt" "os" "testing" ) @@ -64,6 +65,10 @@ func TestUcAccQualityMonitor(t *testing.T) { model_id_col = "model_id" problem_type = "PROBLEM_TYPE_REGRESSION" } + schedule { + quartz_cron_expression = "0 0 12 * * ?" + timezone_id = "PST" + } } resource "databricks_sql_table" "myTimeseries" { @@ -87,6 +92,10 @@ func TestUcAccQualityMonitor(t *testing.T) { granularities = ["1 day"] timestamp_col = "timestamp" } + schedule { + quartz_cron_expression = "0 0 12 * * ?" + timezone_id = "PST" + } } resource "databricks_sql_table" "mySnapshot" { @@ -117,8 +126,7 @@ func TestUcAccUpdateQualityMonitor(t *testing.T) { if os.Getenv("GOOGLE_CREDENTIALS") != "" { t.Skipf("databricks_quality_monitor resource is not available on GCP") } - UnityWorkspaceLevel(t, Step{ - Template: commonPartQualityMonitoring + ` + qmTemplate := ` resource "databricks_quality_monitor" "testMonitorInference" { table_name = databricks_sql_table.myInferenceTable.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" @@ -130,22 +138,15 @@ func TestUcAccUpdateQualityMonitor(t *testing.T) { model_id_col = "model_id" problem_type = "PROBLEM_TYPE_REGRESSION" } + schedule { + quartz_cron_expression = "0 0 %s * * ?" + timezone_id = "PST" + } } - `, + ` + UnityWorkspaceLevel(t, Step{ + Template: commonPartQualityMonitoring + fmt.Sprintf(qmTemplate, "12"), }, Step{ - Template: commonPartQualityMonitoring + ` - resource "databricks_quality_monitor" "testMonitorInference" { - table_name = databricks_sql_table.myInferenceTable.id - assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" - output_schema_name = databricks_schema.things.id - inference_log { - granularities = ["1 hour"] - timestamp_col = "timestamp" - prediction_col = "prediction" - model_id_col = "model_id" - problem_type = "PROBLEM_TYPE_REGRESSION" - } - } - `, + Template: commonPartQualityMonitoring + fmt.Sprintf(qmTemplate, "11"), }) } diff --git a/internal/acceptance/registered_model_test.go b/internal/acceptance/registered_model_test.go index adbaf9b11e..b81b68ce12 100644 --- a/internal/acceptance/registered_model_test.go +++ b/internal/acceptance/registered_model_test.go @@ -46,9 +46,15 @@ func TestUcAccRegisteredModel(t *testing.T) { data "databricks_registered_model" "model" { full_name = databricks_registered_model.model.id } + data "databricks_registered_model_versions" "model_versions" { + full_name = databricks_registered_model.model.id + } output "model" { value = data.databricks_registered_model.model } + output "model_versions" { + value = data.databricks_registered_model_versions.model_versions + } `, }, ) diff --git a/internal/providers/pluginfw/pluginfw_rollout_utils.go b/internal/providers/pluginfw/pluginfw_rollout_utils.go index 87e75d01d8..2a83954e39 100644 --- a/internal/providers/pluginfw/pluginfw_rollout_utils.go +++ b/internal/providers/pluginfw/pluginfw_rollout_utils.go @@ -18,6 +18,7 @@ import ( "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/notificationdestinations" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/qualitymonitor" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/registered_model" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/serving" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/sharing" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/volume" "github.com/hashicorp/terraform-plugin-framework/datasource" @@ -43,7 +44,9 @@ var pluginFwOnlyResources = []func() resource.Resource{ // List of data sources that have been onboarded to the plugin framework - not migrated from sdkv2. var pluginFwOnlyDataSources = []func() datasource.DataSource{ + serving.DataSourceServingEndpoints, registered_model.DataSourceRegisteredModel, + registered_model.DataSourceRegisteredModelVersions, notificationdestinations.DataSourceNotificationDestinations, catalog.DataSourceFunctions, // TODO: Add DataSourceCluster into migratedDataSources after fixing unit tests. diff --git a/internal/providers/pluginfw/products/qualitymonitor/resource_quality_monitor.go b/internal/providers/pluginfw/products/qualitymonitor/resource_quality_monitor.go index 7a0445ddbb..8f6551922c 100644 --- a/internal/providers/pluginfw/products/qualitymonitor/resource_quality_monitor.go +++ b/internal/providers/pluginfw/products/qualitymonitor/resource_quality_monitor.go @@ -195,6 +195,9 @@ func (r *QualityMonitorResource) Update(ctx context.Context, req resource.Update if resp.Diagnostics.HasError() { return } + if updateMonitorGoSDK.Schedule != nil { + updateMonitorGoSDK.Schedule.PauseStatus = "" + } monitor, err := w.QualityMonitors.Update(ctx, updateMonitorGoSDK) if err != nil { resp.Diagnostics.AddError("failed to update monitor", err.Error()) diff --git a/internal/providers/pluginfw/products/registered_model/data_registered_model_versions.go b/internal/providers/pluginfw/products/registered_model/data_registered_model_versions.go new file mode 100644 index 0000000000..916221032a --- /dev/null +++ b/internal/providers/pluginfw/products/registered_model/data_registered_model_versions.go @@ -0,0 +1,78 @@ +package registered_model + +import ( + "context" + "fmt" + + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/databricks/terraform-provider-databricks/internal/service/catalog_tf" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func DataSourceRegisteredModelVersions() datasource.DataSource { + return &RegisteredModelVersionsDataSource{} +} + +var _ datasource.DataSourceWithConfigure = &RegisteredModelVersionsDataSource{} + +type RegisteredModelVersionsDataSource struct { + Client *common.DatabricksClient +} + +type RegisteredModelVersionsData struct { + FullName types.String `tfsdk:"full_name"` + ModelVersions []catalog_tf.ModelVersionInfo `tfsdk:"model_versions" tf:"optional,computed"` +} + +func (d *RegisteredModelVersionsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = "databricks_registered_model_versions" +} + +func (d *RegisteredModelVersionsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + attrs, blocks := tfschema.DataSourceStructToSchemaMap(RegisteredModelVersionsData{}, nil) + resp.Schema = schema.Schema{ + Attributes: attrs, + Blocks: blocks, + } +} + +func (d *RegisteredModelVersionsDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if d.Client == nil { + d.Client = pluginfwcommon.ConfigureDataSource(req, resp) + } +} + +func (d *RegisteredModelVersionsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + w, diags := d.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var registeredModelVersions RegisteredModelVersionsData + diags = req.Config.Get(ctx, ®isteredModelVersions) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + modelFullName := registeredModelVersions.FullName.ValueString() + modelVersions, err := w.ModelVersions.ListByFullName(ctx, modelFullName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("failed to list model versions for registered model %s", modelFullName), err.Error()) + return + } + for _, modelVersionSdk := range modelVersions.ModelVersions { + var modelVersion catalog_tf.ModelVersionInfo + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, modelVersionSdk, &modelVersion)...) + if resp.Diagnostics.HasError() { + return + } + registeredModelVersions.ModelVersions = append(registeredModelVersions.ModelVersions, modelVersion) + } + resp.Diagnostics.Append(resp.State.Set(ctx, registeredModelVersions)...) +} diff --git a/internal/providers/pluginfw/products/serving/data_serving_endpoints.go b/internal/providers/pluginfw/products/serving/data_serving_endpoints.go new file mode 100644 index 0000000000..48068ad072 --- /dev/null +++ b/internal/providers/pluginfw/products/serving/data_serving_endpoints.go @@ -0,0 +1,78 @@ +package serving + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/databricks/terraform-provider-databricks/internal/service/serving_tf" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func DataSourceServingEndpoints() datasource.DataSource { + return &ServingEndpointsDataSource{} +} + +var _ datasource.DataSourceWithConfigure = &ServingEndpointsDataSource{} + +type ServingEndpointsDataSource struct { + Client *common.DatabricksClient +} + +type ServingEndpointsData struct { + Endpoints []serving_tf.ServingEndpoint `tfsdk:"endpoints" tf:"optional,computed"` +} + +func (d *ServingEndpointsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = "databricks_serving_endpoints" +} + +func (d *ServingEndpointsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + attrs, blocks := tfschema.DataSourceStructToSchemaMap(ServingEndpointsData{}, nil) + resp.Schema = schema.Schema{ + Attributes: attrs, + Blocks: blocks, + } +} + +func (d *ServingEndpointsDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if d.Client == nil { + d.Client = pluginfwcommon.ConfigureDataSource(req, resp) + } +} + +func (d *ServingEndpointsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + w, diags := d.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var endpoints ServingEndpointsData + diags = req.Config.Get(ctx, &endpoints) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + endpointsInfoSdk, err := w.ServingEndpoints.ListAll(ctx) + if err != nil { + if apierr.IsMissing(err) { + resp.State.RemoveResource(ctx) + } + resp.Diagnostics.AddError("failed to list endpoints", err.Error()) + return + } + for _, endpoint := range endpointsInfoSdk { + var endpointsInfo serving_tf.ServingEndpoint + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, endpoint, &endpointsInfo)...) + if resp.Diagnostics.HasError() { + return + } + endpoints.Endpoints = append(endpoints.Endpoints, endpointsInfo) + } + resp.Diagnostics.Append(resp.State.Set(ctx, endpoints)...) +} diff --git a/internal/providers/sdkv2/sdkv2.go b/internal/providers/sdkv2/sdkv2.go index 2ca791a2e8..b91078738a 100644 --- a/internal/providers/sdkv2/sdkv2.go +++ b/internal/providers/sdkv2/sdkv2.go @@ -103,6 +103,8 @@ func DatabricksProvider(sdkV2Fallbacks ...pluginfw.SdkV2FallbackOption) *schema. "databricks_mlflow_model": mlflow.DataSourceModel().ToResource(), "databricks_mlflow_models": mlflow.DataSourceModels().ToResource(), "databricks_mws_credentials": mws.DataSourceMwsCredentials().ToResource(), + "databricks_mws_network_connectivity_config": mws.DataSourceMwsNetworkConnectivityConfig().ToResource(), + "databricks_mws_network_connectivity_configs": mws.DataSourceMwsNetworkConnectivityConfigs().ToResource(), "databricks_mws_workspaces": mws.DataSourceMwsWorkspaces().ToResource(), "databricks_node_type": clusters.DataSourceNodeType().ToResource(), "databricks_notebook": workspace.DataSourceNotebook().ToResource(), diff --git a/mws/data_mws_network_connectivity_config.go b/mws/data_mws_network_connectivity_config.go new file mode 100644 index 0000000000..ba50160886 --- /dev/null +++ b/mws/data_mws_network_connectivity_config.go @@ -0,0 +1,33 @@ +package mws + +import ( + "context" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/databricks/terraform-provider-databricks/common" +) + +func DataSourceMwsNetworkConnectivityConfig() common.Resource { + type mwsNetworkConnectivityConfiguration struct { + settings.NetworkConnectivityConfiguration + } + + type mwsNetworkConnectivityConfigurationParams struct { + Name string `json:"name"` + } + + return common.AccountDataWithParams(func(ctx context.Context, data mwsNetworkConnectivityConfigurationParams, a *databricks.AccountClient) (*mwsNetworkConnectivityConfiguration, error) { + list, err := a.NetworkConnectivity.ListNetworkConnectivityConfigurationsAll(ctx, settings.ListNetworkConnectivityConfigurationsRequest{}) + if err != nil { + return nil, err + } + + for _, ncc := range list { + if data.Name == ncc.Name { + return &mwsNetworkConnectivityConfiguration{NetworkConnectivityConfiguration: ncc}, nil + } + } + return nil, nil + }) +} diff --git a/mws/data_mws_network_connectivity_config_test.go b/mws/data_mws_network_connectivity_config_test.go new file mode 100644 index 0000000000..e3eaa0ee28 --- /dev/null +++ b/mws/data_mws_network_connectivity_config_test.go @@ -0,0 +1,67 @@ +package mws + +import ( + "fmt" + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/stretchr/testify/mock" + + "github.com/databricks/terraform-provider-databricks/qa" +) + +func TestDataSourceMwsNetworkConnectivityConfig(t *testing.T) { + var ncc = settings.NetworkConnectivityConfiguration{ + AccountId: "abc", + CreationTime: 0, + EgressConfig: &settings.NccEgressConfig{ + DefaultRules: nil, + TargetRules: nil, + }, + Name: "def", + NetworkConnectivityConfigId: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + Region: "us-east-1", + UpdatedTime: 0, + } + + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + api := a.GetMockNetworkConnectivityAPI().EXPECT() + api.ListNetworkConnectivityConfigurationsAll(mock.Anything, settings.ListNetworkConnectivityConfigurationsRequest{}).Return( + []settings.NetworkConnectivityConfiguration{ncc}, nil, + ) + }, + AccountID: "abc", + Read: true, + NonWritable: true, + Resource: DataSourceMwsNetworkConnectivityConfig(), + ID: "_", + HCL: fmt.Sprintf(` + name = "%s" + `, ncc.Name), + }.ApplyAndExpectData(t, map[string]interface{}{ + "account_id": "abc", + "creation_time": 0, + "egress_config": []interface{}{map[string]interface{}{"default_rules": []interface{}{}, "target_rules": []interface{}{}}}, + "id": "_", + "name": "def", + "network_connectivity_config_id": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "region": "us-east-1", + "updated_time": 0, + }) +} + +func TestDataSourceMwsNetworkConnectivityConfig_Error(t *testing.T) { + qa.ResourceFixture{ + Fixtures: qa.HTTPFailures, + AccountID: "abc", + Resource: DataSourceMwsNetworkConnectivityConfig(), + Read: true, + NonWritable: true, + ID: "_", + HCL: ` + name = "def" + `, + }.ExpectError(t, "i'm a teapot") +} diff --git a/mws/data_mws_network_connectivity_configs.go b/mws/data_mws_network_connectivity_configs.go new file mode 100644 index 0000000000..870973fe53 --- /dev/null +++ b/mws/data_mws_network_connectivity_configs.go @@ -0,0 +1,43 @@ +package mws + +import ( + "context" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/databricks/terraform-provider-databricks/common" +) + +func DataSourceMwsNetworkConnectivityConfigs() common.Resource { + type mwsNetworkConnectivityConfiguration struct { + Names []string `json:"names" tf:"computed,optional"` + } + + type mwsNetworkConnectivityConfigurationParams struct { + Names []string `json:"names" tf:"computed,optional"` + Region string `json:"region" tf:"optional"` + } + + return common.AccountDataWithParams(func(ctx context.Context, data mwsNetworkConnectivityConfigurationParams, a *databricks.AccountClient) (*mwsNetworkConnectivityConfiguration, error) { + list, err := a.NetworkConnectivity.ListNetworkConnectivityConfigurationsAll(ctx, settings.ListNetworkConnectivityConfigurationsRequest{}) + if err != nil { + return nil, err + } + + if data.Region != "" { + filtered := []string{} + for _, ncc := range list { + if data.Region == ncc.Region { + filtered = append(filtered, ncc.Name) + } + } + return &mwsNetworkConnectivityConfiguration{Names: filtered}, nil + } + + names := []string{} + for _, ncc := range list { + names = append(names, ncc.Name) + } + return &mwsNetworkConnectivityConfiguration{Names: names}, nil + }) +} diff --git a/mws/data_mws_network_connectivity_configs_test.go b/mws/data_mws_network_connectivity_configs_test.go new file mode 100644 index 0000000000..71fab07131 --- /dev/null +++ b/mws/data_mws_network_connectivity_configs_test.go @@ -0,0 +1,144 @@ +package mws + +import ( + "fmt" + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/stretchr/testify/mock" + + "github.com/databricks/terraform-provider-databricks/qa" +) + +func getTestNccs() []settings.NetworkConnectivityConfiguration { + return []settings.NetworkConnectivityConfiguration{ + { + AccountId: "abc", + CreationTime: 0, + EgressConfig: &settings.NccEgressConfig{ + DefaultRules: nil, + TargetRules: nil, + }, + Name: "def", + NetworkConnectivityConfigId: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + Region: "us-east-1", + UpdatedTime: 0, + }, + { + AccountId: "abc", + CreationTime: 0, + EgressConfig: &settings.NccEgressConfig{ + DefaultRules: nil, + TargetRules: nil, + }, + Name: "ghi", + NetworkConnectivityConfigId: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + Region: "us-west-1", + UpdatedTime: 0, + }, + } +} + +func TestDataSourceMwsNetworkConnectivityConfigs_All(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + api := a.GetMockNetworkConnectivityAPI().EXPECT() + api.ListNetworkConnectivityConfigurationsAll(mock.Anything, settings.ListNetworkConnectivityConfigurationsRequest{}).Return( + []settings.NetworkConnectivityConfiguration{ + { + AccountId: "abc", + CreationTime: 0, + EgressConfig: &settings.NccEgressConfig{ + DefaultRules: nil, + TargetRules: nil, + }, + Name: "def", + NetworkConnectivityConfigId: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + Region: "us-east-1", + UpdatedTime: 0, + }, + { + AccountId: "abc", + CreationTime: 0, + EgressConfig: &settings.NccEgressConfig{ + DefaultRules: nil, + TargetRules: nil, + }, + Name: "ghi", + NetworkConnectivityConfigId: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + Region: "us-west-1", + UpdatedTime: 0, + }, + }, nil, + ) + }, + AccountID: "abc", + Read: true, + NonWritable: true, + Resource: DataSourceMwsNetworkConnectivityConfigs(), + ID: "_", + }.ApplyAndExpectData(t, map[string]any{ + "names": []interface{}{"def", "ghi"}, + }) +} + +func TestDataSourceMwsNetworkConnectivityConfigs_Filter(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + api := a.GetMockNetworkConnectivityAPI().EXPECT() + api.ListNetworkConnectivityConfigurationsAll(mock.Anything, settings.ListNetworkConnectivityConfigurationsRequest{}).Return( + []settings.NetworkConnectivityConfiguration{ + { + AccountId: "abc", + CreationTime: 0, + EgressConfig: &settings.NccEgressConfig{ + DefaultRules: nil, + TargetRules: nil, + }, + Name: "def", + NetworkConnectivityConfigId: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + Region: "us-east-1", + UpdatedTime: 0, + }, + { + AccountId: "abc", + CreationTime: 0, + EgressConfig: &settings.NccEgressConfig{ + DefaultRules: nil, + TargetRules: nil, + }, + Name: "def-3", + NetworkConnectivityConfigId: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + Region: "us-west-1", + UpdatedTime: 0, + }, + }, nil, + ) + }, + AccountID: "abc", + Read: true, + NonWritable: true, + Resource: DataSourceMwsNetworkConnectivityConfigs(), + ID: "_", + HCL: fmt.Sprintf(` + region = "%s" + `, getTestNccs()[0].Region), + }.ApplyAndExpectData(t, map[string]any{ + "names": []interface{}{"def"}, + }) +} + +func TestDataSourceMwsNetworkConnectivityConfigs_Error(t *testing.T) { + qa.ResourceFixture{ + Fixtures: qa.HTTPFailures, + AccountID: "abc", + Resource: DataSourceMwsNetworkConnectivityConfigs(), + Read: true, + NonWritable: true, + ID: "_", + HCL: ` + region = "us-east-1" + `, + }.ExpectError(t, "i'm a teapot") +} diff --git a/qa/testing.go b/qa/testing.go index 0390ef33bc..366c8c6786 100644 --- a/qa/testing.go +++ b/qa/testing.go @@ -11,6 +11,7 @@ import ( "net/http" "net/http/httptest" "os" + "reflect" "regexp" "sort" "strings" @@ -390,9 +391,10 @@ func (f ResourceFixture) ApplyAndExpectData(t *testing.T, data map[string]any) { if k == "id" { assert.Equal(t, expected, d.Id()) } else if that, ok := d.Get(k).(*schema.Set); ok { - this := expected.([]any) - assert.Equal(t, len(this), that.Len(), "set has different length") - for _, item := range this { + this := reflect.ValueOf(expected) + assert.Equal(t, this.Len(), that.Len(), "set has different length") + for i := 0; i < this.Len(); i++ { + item := this.Index(i).Interface() assert.True(t, that.Contains(item), "set does not contain %s", item) } } else { diff --git a/qa/testing_test.go b/qa/testing_test.go index b68d5899e1..68acbe3159 100644 --- a/qa/testing_test.go +++ b/qa/testing_test.go @@ -93,6 +93,22 @@ var noopContextResource = common.Resource{ Type: schema.TypeBool, Required: true, }, + "nested": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tags": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, }, Read: noopContext, Create: noopContext, @@ -218,6 +234,37 @@ func TestResourceFixture_ApplyAndExpectData(t *testing.T) { }.ApplyAndExpectData(t, map[string]any{"id": "x", "dummy": true, "trigger": "now"}) } +func TestResourceFixture_ApplyAndExpectDataSet(t *testing.T) { + ResourceFixture{ + CommandMock: func(commandStr string) common.CommandResults { + return common.CommandResults{ + ResultType: "text", + Data: "yes", + } + }, + Azure: true, + Resource: noopContextResource, + ID: "x", + Delete: true, + HCL: ` + dummy = true + trigger = "now" + nested { + tags { + env = "prod" + } + } + `, + }.ApplyAndExpectData(t, + map[string]any{ + "id": "x", + "dummy": true, + "trigger": "now", + "nested": []any{map[string]any{"tags": map[string]any{"env": "prod"}}}, + }, + ) +} + func TestResourceFixture_InstanceState(t *testing.T) { ResourceFixture{ Resource: noopContextResource, diff --git a/sql/resource_query.go b/sql/resource_query.go index 120353c171..bccd7a0533 100644 --- a/sql/resource_query.go +++ b/sql/resource_query.go @@ -32,9 +32,7 @@ func (QueryStruct) CustomizeSchema(m *common.CustomizableSchema) *common.Customi m.SchemaPath("warehouse_id").SetRequired().SetValidateFunc(validation.StringIsNotWhiteSpace) m.SchemaPath("parent_path").SetCustomSuppressDiff(common.WorkspaceOrEmptyPathPrefixDiffSuppress).SetForceNew() m.SchemaPath("owner_user_name").SetSuppressDiff() - m.SchemaPath("run_as_mode").SetSuppressDiff() - //m.SchemaPath("").SetSuppressDiff() - //m.SchemaPath("").SetSuppressDiff() + m.SchemaPath("run_as_mode").SetSuppressDiff().SetValidateFunc(validation.StringInSlice([]string{"VIEWER", "OWNER"}, false)) m.SchemaPath("id").SetReadOnly() m.SchemaPath("create_time").SetReadOnly() m.SchemaPath("lifecycle_state").SetReadOnly()