diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 2d9cb6d86..ecf041814 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -cf9c61453990df0f9453670f2fe68e1b128647a2 \ No newline at end of file +25b2478e5a18c888f0d423249abde5499dc58424 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 25cefe6cf..e294157b7 100644 --- a/.gitattributes +++ b/.gitattributes @@ -95,6 +95,7 @@ experimental/mocks/service/settings/mock_credentials_manager_interface.go lingui experimental/mocks/service/settings/mock_csp_enablement_account_interface.go linguist-generated=true experimental/mocks/service/settings/mock_default_namespace_interface.go linguist-generated=true experimental/mocks/service/settings/mock_disable_legacy_access_interface.go linguist-generated=true +experimental/mocks/service/settings/mock_disable_legacy_dbfs_interface.go linguist-generated=true experimental/mocks/service/settings/mock_disable_legacy_features_interface.go linguist-generated=true experimental/mocks/service/settings/mock_enhanced_security_monitoring_interface.go linguist-generated=true experimental/mocks/service/settings/mock_esm_enablement_account_interface.go linguist-generated=true diff --git a/CHANGELOG.md b/CHANGELOG.md index fad77f4ed..0556f3cbf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,50 @@ # Version changelog +## [Release] Release v0.50.0 + +### Internal Changes + + * Add DCO guidelines ([#1047](https://github.com/databricks/databricks-sdk-go/pull/1047)). + * Add test instructions for external contributors ([#1073](https://github.com/databricks/databricks-sdk-go/pull/1073)). + * Automatically trigger integration tests ([#1067](https://github.com/databricks/databricks-sdk-go/pull/1067)). + * Move templates in the code generator ([#1075](https://github.com/databricks/databricks-sdk-go/pull/1075)). + * Remove unnecessary test ([#1071](https://github.com/databricks/databricks-sdk-go/pull/1071)). + + +### API Changes: + + * Added [w.AibiDashboardEmbeddingAccessPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#AibiDashboardEmbeddingAccessPolicyAPI) workspace-level service and [w.AibiDashboardEmbeddingApprovedDomains](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#AibiDashboardEmbeddingApprovedDomainsAPI) workspace-level service. + * Added `AppDeployment` field for [apps.CreateAppDeploymentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#CreateAppDeploymentRequest). + * Added `App` field for [apps.CreateAppRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#CreateAppRequest). + * Added `App` field for [apps.UpdateAppRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#UpdateAppRequest). + * Added `Table` field for [catalog.CreateOnlineTableRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CreateOnlineTableRequest). + * Added `Dashboard` field for [dashboards.CreateDashboardRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#CreateDashboardRequest). + * Added `Schedule` field for [dashboards.CreateScheduleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#CreateScheduleRequest). + * Added `Subscription` field for [dashboards.CreateSubscriptionRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#CreateSubscriptionRequest). + * Added `Dashboard` field for [dashboards.UpdateDashboardRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#UpdateDashboardRequest). + * Added `Schedule` field for [dashboards.UpdateScheduleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#UpdateScheduleRequest). + * Added `PageToken` field for [oauth2.ListServicePrincipalSecretsRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#ListServicePrincipalSecretsRequest). + * Added `NextPageToken` field for [oauth2.ListServicePrincipalSecretsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#ListServicePrincipalSecretsResponse). + * Added `IsNoPublicIpEnabled` field for [provisioning.CreateWorkspaceRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/provisioning#CreateWorkspaceRequest). + * Added `ExternalCustomerInfo` and `IsNoPublicIpEnabled` fields for [provisioning.Workspace](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/provisioning#Workspace). + * Added `LastUsedDay` field for [settings.TokenInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#TokenInfo). + * Changed `Create` method for [w.Apps](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#AppsAPI) workspace-level service with new required argument order. + * Changed `ExecuteMessageQuery` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service . New request type is [dashboards.GenieExecuteMessageQueryRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieExecuteMessageQueryRequest). + * Changed `ExecuteMessageQuery` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service to type `ExecuteMessageQuery` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. + * Changed `Create`, `CreateSchedule`, `CreateSubscription` and `UpdateSchedule` methods for [w.Lakeview](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#LakeviewAPI) workspace-level service with new required argument order. + * Removed `DeploymentId`, `Mode` and `SourceCodePath` fields for [apps.CreateAppDeploymentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#CreateAppDeploymentRequest). + * Removed `Description`, `Name` and `Resources` fields for [apps.CreateAppRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#CreateAppRequest). + * Removed `Description` and `Resources` fields for [apps.UpdateAppRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#UpdateAppRequest). + * Removed `Name` and `Spec` fields for [catalog.CreateOnlineTableRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CreateOnlineTableRequest). + * Removed `DisplayName`, `ParentPath`, `SerializedDashboard` and `WarehouseId` fields for [dashboards.CreateDashboardRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#CreateDashboardRequest). + * Removed `CronSchedule`, `DisplayName` and `PauseStatus` fields for [dashboards.CreateScheduleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#CreateScheduleRequest). + * Removed `Subscriber` field for [dashboards.CreateSubscriptionRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#CreateSubscriptionRequest). + * Removed `DisplayName`, `Etag`, `SerializedDashboard` and `WarehouseId` fields for [dashboards.UpdateDashboardRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#UpdateDashboardRequest). + * Removed `CronSchedule`, `DisplayName`, `Etag` and `PauseStatus` fields for [dashboards.UpdateScheduleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#UpdateScheduleRequest). + * Removed `PrevPageToken` field for [jobs.Run](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Run). + +OpenAPI SHA: 25b2478e5a18c888f0d423249abde5499dc58424, Date: 2024-10-31 + ## [Release] Release v0.49.0 ### API Changes: diff --git a/experimental/mocks/mock_workspace_client.go b/experimental/mocks/mock_workspace_client.go index a4a85a71c..a37a66426 100755 --- a/experimental/mocks/mock_workspace_client.go +++ b/experimental/mocks/mock_workspace_client.go @@ -136,6 +136,12 @@ func NewMockWorkspaceClient(t interface { mocksettingsAPI := cli.GetMockSettingsAPI() + mockaibiDashboardEmbeddingAccessPolicy := settings.NewMockAibiDashboardEmbeddingAccessPolicyInterface(t) + mocksettingsAPI.On("AibiDashboardEmbeddingAccessPolicy").Return(mockaibiDashboardEmbeddingAccessPolicy).Maybe() + + mockaibiDashboardEmbeddingApprovedDomains := settings.NewMockAibiDashboardEmbeddingApprovedDomainsInterface(t) + mocksettingsAPI.On("AibiDashboardEmbeddingApprovedDomains").Return(mockaibiDashboardEmbeddingApprovedDomains).Maybe() + mockautomaticClusterUpdate := settings.NewMockAutomaticClusterUpdateInterface(t) mocksettingsAPI.On("AutomaticClusterUpdate").Return(mockautomaticClusterUpdate).Maybe() @@ -160,6 +166,22 @@ func NewMockWorkspaceClient(t interface { return cli } +func (m *MockWorkspaceClient) GetMockAibiDashboardEmbeddingAccessPolicyAPI() *settings.MockAibiDashboardEmbeddingAccessPolicyInterface { + api, ok := m.GetMockSettingsAPI().AibiDashboardEmbeddingAccessPolicy().(*settings.MockAibiDashboardEmbeddingAccessPolicyInterface) + if !ok { + panic(fmt.Sprintf("expected AibiDashboardEmbeddingAccessPolicy to be *settings.MockAibiDashboardEmbeddingAccessPolicyInterface, actual was %T", m.GetMockSettingsAPI().AibiDashboardEmbeddingAccessPolicy())) + } + return api +} + +func (m *MockWorkspaceClient) GetMockAibiDashboardEmbeddingApprovedDomainsAPI() *settings.MockAibiDashboardEmbeddingApprovedDomainsInterface { + api, ok := m.GetMockSettingsAPI().AibiDashboardEmbeddingApprovedDomains().(*settings.MockAibiDashboardEmbeddingApprovedDomainsInterface) + if !ok { + panic(fmt.Sprintf("expected AibiDashboardEmbeddingApprovedDomains to be *settings.MockAibiDashboardEmbeddingApprovedDomainsInterface, actual was %T", m.GetMockSettingsAPI().AibiDashboardEmbeddingApprovedDomains())) + } + return api +} + func (m *MockWorkspaceClient) GetMockAutomaticClusterUpdateAPI() *settings.MockAutomaticClusterUpdateInterface { api, ok := m.GetMockSettingsAPI().AutomaticClusterUpdate().(*settings.MockAutomaticClusterUpdateInterface) if !ok { diff --git a/experimental/mocks/service/catalog/mock_online_tables_interface.go b/experimental/mocks/service/catalog/mock_online_tables_interface.go index 39fb1c597..d49243937 100644 --- a/experimental/mocks/service/catalog/mock_online_tables_interface.go +++ b/experimental/mocks/service/catalog/mock_online_tables_interface.go @@ -8,6 +8,10 @@ import ( catalog "github.com/databricks/databricks-sdk-go/service/catalog" mock "github.com/stretchr/testify/mock" + + retries "github.com/databricks/databricks-sdk-go/retries" + + time "time" ) // MockOnlineTablesInterface is an autogenerated mock type for the OnlineTablesInterface type @@ -23,29 +27,29 @@ func (_m *MockOnlineTablesInterface) EXPECT() *MockOnlineTablesInterface_Expecte return &MockOnlineTablesInterface_Expecter{mock: &_m.Mock} } -// Create provides a mock function with given fields: ctx, request -func (_m *MockOnlineTablesInterface) Create(ctx context.Context, request catalog.CreateOnlineTableRequest) (*catalog.OnlineTable, error) { - ret := _m.Called(ctx, request) +// Create provides a mock function with given fields: ctx, createOnlineTableRequest +func (_m *MockOnlineTablesInterface) Create(ctx context.Context, createOnlineTableRequest catalog.CreateOnlineTableRequest) (*catalog.WaitGetOnlineTableActive[catalog.OnlineTable], error) { + ret := _m.Called(ctx, createOnlineTableRequest) if len(ret) == 0 { panic("no return value specified for Create") } - var r0 *catalog.OnlineTable + var r0 *catalog.WaitGetOnlineTableActive[catalog.OnlineTable] var r1 error - if rf, ok := ret.Get(0).(func(context.Context, catalog.CreateOnlineTableRequest) (*catalog.OnlineTable, error)); ok { - return rf(ctx, request) + if rf, ok := ret.Get(0).(func(context.Context, catalog.CreateOnlineTableRequest) (*catalog.WaitGetOnlineTableActive[catalog.OnlineTable], error)); ok { + return rf(ctx, createOnlineTableRequest) } - if rf, ok := ret.Get(0).(func(context.Context, catalog.CreateOnlineTableRequest) *catalog.OnlineTable); ok { - r0 = rf(ctx, request) + if rf, ok := ret.Get(0).(func(context.Context, catalog.CreateOnlineTableRequest) *catalog.WaitGetOnlineTableActive[catalog.OnlineTable]); ok { + r0 = rf(ctx, createOnlineTableRequest) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*catalog.OnlineTable) + r0 = ret.Get(0).(*catalog.WaitGetOnlineTableActive[catalog.OnlineTable]) } } if rf, ok := ret.Get(1).(func(context.Context, catalog.CreateOnlineTableRequest) error); ok { - r1 = rf(ctx, request) + r1 = rf(ctx, createOnlineTableRequest) } else { r1 = ret.Error(1) } @@ -60,24 +64,98 @@ type MockOnlineTablesInterface_Create_Call struct { // Create is a helper method to define mock.On call // - ctx context.Context -// - request catalog.CreateOnlineTableRequest -func (_e *MockOnlineTablesInterface_Expecter) Create(ctx interface{}, request interface{}) *MockOnlineTablesInterface_Create_Call { - return &MockOnlineTablesInterface_Create_Call{Call: _e.mock.On("Create", ctx, request)} +// - createOnlineTableRequest catalog.CreateOnlineTableRequest +func (_e *MockOnlineTablesInterface_Expecter) Create(ctx interface{}, createOnlineTableRequest interface{}) *MockOnlineTablesInterface_Create_Call { + return &MockOnlineTablesInterface_Create_Call{Call: _e.mock.On("Create", ctx, createOnlineTableRequest)} } -func (_c *MockOnlineTablesInterface_Create_Call) Run(run func(ctx context.Context, request catalog.CreateOnlineTableRequest)) *MockOnlineTablesInterface_Create_Call { +func (_c *MockOnlineTablesInterface_Create_Call) Run(run func(ctx context.Context, createOnlineTableRequest catalog.CreateOnlineTableRequest)) *MockOnlineTablesInterface_Create_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(catalog.CreateOnlineTableRequest)) }) return _c } -func (_c *MockOnlineTablesInterface_Create_Call) Return(_a0 *catalog.OnlineTable, _a1 error) *MockOnlineTablesInterface_Create_Call { +func (_c *MockOnlineTablesInterface_Create_Call) Return(_a0 *catalog.WaitGetOnlineTableActive[catalog.OnlineTable], _a1 error) *MockOnlineTablesInterface_Create_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MockOnlineTablesInterface_Create_Call) RunAndReturn(run func(context.Context, catalog.CreateOnlineTableRequest) (*catalog.OnlineTable, error)) *MockOnlineTablesInterface_Create_Call { +func (_c *MockOnlineTablesInterface_Create_Call) RunAndReturn(run func(context.Context, catalog.CreateOnlineTableRequest) (*catalog.WaitGetOnlineTableActive[catalog.OnlineTable], error)) *MockOnlineTablesInterface_Create_Call { + _c.Call.Return(run) + return _c +} + +// CreateAndWait provides a mock function with given fields: ctx, createOnlineTableRequest, options +func (_m *MockOnlineTablesInterface) CreateAndWait(ctx context.Context, createOnlineTableRequest catalog.CreateOnlineTableRequest, options ...retries.Option[catalog.OnlineTable]) (*catalog.OnlineTable, error) { + _va := make([]interface{}, len(options)) + for _i := range options { + _va[_i] = options[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, createOnlineTableRequest) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateAndWait") + } + + var r0 *catalog.OnlineTable + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, catalog.CreateOnlineTableRequest, ...retries.Option[catalog.OnlineTable]) (*catalog.OnlineTable, error)); ok { + return rf(ctx, createOnlineTableRequest, options...) + } + if rf, ok := ret.Get(0).(func(context.Context, catalog.CreateOnlineTableRequest, ...retries.Option[catalog.OnlineTable]) *catalog.OnlineTable); ok { + r0 = rf(ctx, createOnlineTableRequest, options...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*catalog.OnlineTable) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, catalog.CreateOnlineTableRequest, ...retries.Option[catalog.OnlineTable]) error); ok { + r1 = rf(ctx, createOnlineTableRequest, options...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockOnlineTablesInterface_CreateAndWait_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateAndWait' +type MockOnlineTablesInterface_CreateAndWait_Call struct { + *mock.Call +} + +// CreateAndWait is a helper method to define mock.On call +// - ctx context.Context +// - createOnlineTableRequest catalog.CreateOnlineTableRequest +// - options ...retries.Option[catalog.OnlineTable] +func (_e *MockOnlineTablesInterface_Expecter) CreateAndWait(ctx interface{}, createOnlineTableRequest interface{}, options ...interface{}) *MockOnlineTablesInterface_CreateAndWait_Call { + return &MockOnlineTablesInterface_CreateAndWait_Call{Call: _e.mock.On("CreateAndWait", + append([]interface{}{ctx, createOnlineTableRequest}, options...)...)} +} + +func (_c *MockOnlineTablesInterface_CreateAndWait_Call) Run(run func(ctx context.Context, createOnlineTableRequest catalog.CreateOnlineTableRequest, options ...retries.Option[catalog.OnlineTable])) *MockOnlineTablesInterface_CreateAndWait_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]retries.Option[catalog.OnlineTable], len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(retries.Option[catalog.OnlineTable]) + } + } + run(args[0].(context.Context), args[1].(catalog.CreateOnlineTableRequest), variadicArgs...) + }) + return _c +} + +func (_c *MockOnlineTablesInterface_CreateAndWait_Call) Return(_a0 *catalog.OnlineTable, _a1 error) *MockOnlineTablesInterface_CreateAndWait_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockOnlineTablesInterface_CreateAndWait_Call) RunAndReturn(run func(context.Context, catalog.CreateOnlineTableRequest, ...retries.Option[catalog.OnlineTable]) (*catalog.OnlineTable, error)) *MockOnlineTablesInterface_CreateAndWait_Call { _c.Call.Return(run) return _c } @@ -294,6 +372,67 @@ func (_c *MockOnlineTablesInterface_GetByName_Call) RunAndReturn(run func(contex return _c } +// WaitGetOnlineTableActive provides a mock function with given fields: ctx, name, timeout, callback +func (_m *MockOnlineTablesInterface) WaitGetOnlineTableActive(ctx context.Context, name string, timeout time.Duration, callback func(*catalog.OnlineTable)) (*catalog.OnlineTable, error) { + ret := _m.Called(ctx, name, timeout, callback) + + if len(ret) == 0 { + panic("no return value specified for WaitGetOnlineTableActive") + } + + var r0 *catalog.OnlineTable + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, time.Duration, func(*catalog.OnlineTable)) (*catalog.OnlineTable, error)); ok { + return rf(ctx, name, timeout, callback) + } + if rf, ok := ret.Get(0).(func(context.Context, string, time.Duration, func(*catalog.OnlineTable)) *catalog.OnlineTable); ok { + r0 = rf(ctx, name, timeout, callback) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*catalog.OnlineTable) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, time.Duration, func(*catalog.OnlineTable)) error); ok { + r1 = rf(ctx, name, timeout, callback) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockOnlineTablesInterface_WaitGetOnlineTableActive_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitGetOnlineTableActive' +type MockOnlineTablesInterface_WaitGetOnlineTableActive_Call struct { + *mock.Call +} + +// WaitGetOnlineTableActive is a helper method to define mock.On call +// - ctx context.Context +// - name string +// - timeout time.Duration +// - callback func(*catalog.OnlineTable) +func (_e *MockOnlineTablesInterface_Expecter) WaitGetOnlineTableActive(ctx interface{}, name interface{}, timeout interface{}, callback interface{}) *MockOnlineTablesInterface_WaitGetOnlineTableActive_Call { + return &MockOnlineTablesInterface_WaitGetOnlineTableActive_Call{Call: _e.mock.On("WaitGetOnlineTableActive", ctx, name, timeout, callback)} +} + +func (_c *MockOnlineTablesInterface_WaitGetOnlineTableActive_Call) Run(run func(ctx context.Context, name string, timeout time.Duration, callback func(*catalog.OnlineTable))) *MockOnlineTablesInterface_WaitGetOnlineTableActive_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(time.Duration), args[3].(func(*catalog.OnlineTable))) + }) + return _c +} + +func (_c *MockOnlineTablesInterface_WaitGetOnlineTableActive_Call) Return(_a0 *catalog.OnlineTable, _a1 error) *MockOnlineTablesInterface_WaitGetOnlineTableActive_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockOnlineTablesInterface_WaitGetOnlineTableActive_Call) RunAndReturn(run func(context.Context, string, time.Duration, func(*catalog.OnlineTable)) (*catalog.OnlineTable, error)) *MockOnlineTablesInterface_WaitGetOnlineTableActive_Call { + _c.Call.Return(run) + return _c +} + // NewMockOnlineTablesInterface creates a new instance of MockOnlineTablesInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewMockOnlineTablesInterface(t interface { diff --git a/experimental/mocks/service/dashboards/mock_genie_interface.go b/experimental/mocks/service/dashboards/mock_genie_interface.go index f04a05948..e943c8b10 100644 --- a/experimental/mocks/service/dashboards/mock_genie_interface.go +++ b/experimental/mocks/service/dashboards/mock_genie_interface.go @@ -160,7 +160,7 @@ func (_c *MockGenieInterface_CreateMessageAndWait_Call) RunAndReturn(run func(co } // ExecuteMessageQuery provides a mock function with given fields: ctx, request -func (_m *MockGenieInterface) ExecuteMessageQuery(ctx context.Context, request dashboards.ExecuteMessageQueryRequest) (*dashboards.GenieGetMessageQueryResultResponse, error) { +func (_m *MockGenieInterface) ExecuteMessageQuery(ctx context.Context, request dashboards.GenieExecuteMessageQueryRequest) (*dashboards.GenieGetMessageQueryResultResponse, error) { ret := _m.Called(ctx, request) if len(ret) == 0 { @@ -169,10 +169,10 @@ func (_m *MockGenieInterface) ExecuteMessageQuery(ctx context.Context, request d var r0 *dashboards.GenieGetMessageQueryResultResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, dashboards.ExecuteMessageQueryRequest) (*dashboards.GenieGetMessageQueryResultResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieExecuteMessageQueryRequest) (*dashboards.GenieGetMessageQueryResultResponse, error)); ok { return rf(ctx, request) } - if rf, ok := ret.Get(0).(func(context.Context, dashboards.ExecuteMessageQueryRequest) *dashboards.GenieGetMessageQueryResultResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieExecuteMessageQueryRequest) *dashboards.GenieGetMessageQueryResultResponse); ok { r0 = rf(ctx, request) } else { if ret.Get(0) != nil { @@ -180,7 +180,7 @@ func (_m *MockGenieInterface) ExecuteMessageQuery(ctx context.Context, request d } } - if rf, ok := ret.Get(1).(func(context.Context, dashboards.ExecuteMessageQueryRequest) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, dashboards.GenieExecuteMessageQueryRequest) error); ok { r1 = rf(ctx, request) } else { r1 = ret.Error(1) @@ -196,14 +196,14 @@ type MockGenieInterface_ExecuteMessageQuery_Call struct { // ExecuteMessageQuery is a helper method to define mock.On call // - ctx context.Context -// - request dashboards.ExecuteMessageQueryRequest +// - request dashboards.GenieExecuteMessageQueryRequest func (_e *MockGenieInterface_Expecter) ExecuteMessageQuery(ctx interface{}, request interface{}) *MockGenieInterface_ExecuteMessageQuery_Call { return &MockGenieInterface_ExecuteMessageQuery_Call{Call: _e.mock.On("ExecuteMessageQuery", ctx, request)} } -func (_c *MockGenieInterface_ExecuteMessageQuery_Call) Run(run func(ctx context.Context, request dashboards.ExecuteMessageQueryRequest)) *MockGenieInterface_ExecuteMessageQuery_Call { +func (_c *MockGenieInterface_ExecuteMessageQuery_Call) Run(run func(ctx context.Context, request dashboards.GenieExecuteMessageQueryRequest)) *MockGenieInterface_ExecuteMessageQuery_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(dashboards.ExecuteMessageQueryRequest)) + run(args[0].(context.Context), args[1].(dashboards.GenieExecuteMessageQueryRequest)) }) return _c } @@ -213,7 +213,7 @@ func (_c *MockGenieInterface_ExecuteMessageQuery_Call) Return(_a0 *dashboards.Ge return _c } -func (_c *MockGenieInterface_ExecuteMessageQuery_Call) RunAndReturn(run func(context.Context, dashboards.ExecuteMessageQueryRequest) (*dashboards.GenieGetMessageQueryResultResponse, error)) *MockGenieInterface_ExecuteMessageQuery_Call { +func (_c *MockGenieInterface_ExecuteMessageQuery_Call) RunAndReturn(run func(context.Context, dashboards.GenieExecuteMessageQueryRequest) (*dashboards.GenieGetMessageQueryResultResponse, error)) *MockGenieInterface_ExecuteMessageQuery_Call { _c.Call.Return(run) return _c } diff --git a/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_access_policy_interface.go b/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_access_policy_interface.go new file mode 100644 index 000000000..15fb38ac1 --- /dev/null +++ b/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_access_policy_interface.go @@ -0,0 +1,155 @@ +// Code generated by mockery v2.43.0. DO NOT EDIT. + +package settings + +import ( + context "context" + + settings "github.com/databricks/databricks-sdk-go/service/settings" + mock "github.com/stretchr/testify/mock" +) + +// MockAibiDashboardEmbeddingAccessPolicyInterface is an autogenerated mock type for the AibiDashboardEmbeddingAccessPolicyInterface type +type MockAibiDashboardEmbeddingAccessPolicyInterface struct { + mock.Mock +} + +type MockAibiDashboardEmbeddingAccessPolicyInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockAibiDashboardEmbeddingAccessPolicyInterface) EXPECT() *MockAibiDashboardEmbeddingAccessPolicyInterface_Expecter { + return &MockAibiDashboardEmbeddingAccessPolicyInterface_Expecter{mock: &_m.Mock} +} + +// Get provides a mock function with given fields: ctx, request +func (_m *MockAibiDashboardEmbeddingAccessPolicyInterface) Get(ctx context.Context, request settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest) (*settings.AibiDashboardEmbeddingAccessPolicySetting, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *settings.AibiDashboardEmbeddingAccessPolicySetting + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest) (*settings.AibiDashboardEmbeddingAccessPolicySetting, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest) *settings.AibiDashboardEmbeddingAccessPolicySetting); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.AibiDashboardEmbeddingAccessPolicySetting) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAibiDashboardEmbeddingAccessPolicyInterface_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type MockAibiDashboardEmbeddingAccessPolicyInterface_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - ctx context.Context +// - request settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest +func (_e *MockAibiDashboardEmbeddingAccessPolicyInterface_Expecter) Get(ctx interface{}, request interface{}) *MockAibiDashboardEmbeddingAccessPolicyInterface_Get_Call { + return &MockAibiDashboardEmbeddingAccessPolicyInterface_Get_Call{Call: _e.mock.On("Get", ctx, request)} +} + +func (_c *MockAibiDashboardEmbeddingAccessPolicyInterface_Get_Call) Run(run func(ctx context.Context, request settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest)) *MockAibiDashboardEmbeddingAccessPolicyInterface_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest)) + }) + return _c +} + +func (_c *MockAibiDashboardEmbeddingAccessPolicyInterface_Get_Call) Return(_a0 *settings.AibiDashboardEmbeddingAccessPolicySetting, _a1 error) *MockAibiDashboardEmbeddingAccessPolicyInterface_Get_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAibiDashboardEmbeddingAccessPolicyInterface_Get_Call) RunAndReturn(run func(context.Context, settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest) (*settings.AibiDashboardEmbeddingAccessPolicySetting, error)) *MockAibiDashboardEmbeddingAccessPolicyInterface_Get_Call { + _c.Call.Return(run) + return _c +} + +// Update provides a mock function with given fields: ctx, request +func (_m *MockAibiDashboardEmbeddingAccessPolicyInterface) Update(ctx context.Context, request settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) (*settings.AibiDashboardEmbeddingAccessPolicySetting, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for Update") + } + + var r0 *settings.AibiDashboardEmbeddingAccessPolicySetting + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) (*settings.AibiDashboardEmbeddingAccessPolicySetting, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) *settings.AibiDashboardEmbeddingAccessPolicySetting); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.AibiDashboardEmbeddingAccessPolicySetting) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAibiDashboardEmbeddingAccessPolicyInterface_Update_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Update' +type MockAibiDashboardEmbeddingAccessPolicyInterface_Update_Call struct { + *mock.Call +} + +// Update is a helper method to define mock.On call +// - ctx context.Context +// - request settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest +func (_e *MockAibiDashboardEmbeddingAccessPolicyInterface_Expecter) Update(ctx interface{}, request interface{}) *MockAibiDashboardEmbeddingAccessPolicyInterface_Update_Call { + return &MockAibiDashboardEmbeddingAccessPolicyInterface_Update_Call{Call: _e.mock.On("Update", ctx, request)} +} + +func (_c *MockAibiDashboardEmbeddingAccessPolicyInterface_Update_Call) Run(run func(ctx context.Context, request settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest)) *MockAibiDashboardEmbeddingAccessPolicyInterface_Update_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest)) + }) + return _c +} + +func (_c *MockAibiDashboardEmbeddingAccessPolicyInterface_Update_Call) Return(_a0 *settings.AibiDashboardEmbeddingAccessPolicySetting, _a1 error) *MockAibiDashboardEmbeddingAccessPolicyInterface_Update_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAibiDashboardEmbeddingAccessPolicyInterface_Update_Call) RunAndReturn(run func(context.Context, settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) (*settings.AibiDashboardEmbeddingAccessPolicySetting, error)) *MockAibiDashboardEmbeddingAccessPolicyInterface_Update_Call { + _c.Call.Return(run) + return _c +} + +// NewMockAibiDashboardEmbeddingAccessPolicyInterface creates a new instance of MockAibiDashboardEmbeddingAccessPolicyInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockAibiDashboardEmbeddingAccessPolicyInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockAibiDashboardEmbeddingAccessPolicyInterface { + mock := &MockAibiDashboardEmbeddingAccessPolicyInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_approved_domains_interface.go b/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_approved_domains_interface.go new file mode 100644 index 000000000..aaeeeba9a --- /dev/null +++ b/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_approved_domains_interface.go @@ -0,0 +1,155 @@ +// Code generated by mockery v2.43.0. DO NOT EDIT. + +package settings + +import ( + context "context" + + settings "github.com/databricks/databricks-sdk-go/service/settings" + mock "github.com/stretchr/testify/mock" +) + +// MockAibiDashboardEmbeddingApprovedDomainsInterface is an autogenerated mock type for the AibiDashboardEmbeddingApprovedDomainsInterface type +type MockAibiDashboardEmbeddingApprovedDomainsInterface struct { + mock.Mock +} + +type MockAibiDashboardEmbeddingApprovedDomainsInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockAibiDashboardEmbeddingApprovedDomainsInterface) EXPECT() *MockAibiDashboardEmbeddingApprovedDomainsInterface_Expecter { + return &MockAibiDashboardEmbeddingApprovedDomainsInterface_Expecter{mock: &_m.Mock} +} + +// Get provides a mock function with given fields: ctx, request +func (_m *MockAibiDashboardEmbeddingApprovedDomainsInterface) Get(ctx context.Context, request settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*settings.AibiDashboardEmbeddingApprovedDomainsSetting, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *settings.AibiDashboardEmbeddingApprovedDomainsSetting + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*settings.AibiDashboardEmbeddingApprovedDomainsSetting, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) *settings.AibiDashboardEmbeddingApprovedDomainsSetting); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.AibiDashboardEmbeddingApprovedDomainsSetting) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAibiDashboardEmbeddingApprovedDomainsInterface_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type MockAibiDashboardEmbeddingApprovedDomainsInterface_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - ctx context.Context +// - request settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest +func (_e *MockAibiDashboardEmbeddingApprovedDomainsInterface_Expecter) Get(ctx interface{}, request interface{}) *MockAibiDashboardEmbeddingApprovedDomainsInterface_Get_Call { + return &MockAibiDashboardEmbeddingApprovedDomainsInterface_Get_Call{Call: _e.mock.On("Get", ctx, request)} +} + +func (_c *MockAibiDashboardEmbeddingApprovedDomainsInterface_Get_Call) Run(run func(ctx context.Context, request settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest)) *MockAibiDashboardEmbeddingApprovedDomainsInterface_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest)) + }) + return _c +} + +func (_c *MockAibiDashboardEmbeddingApprovedDomainsInterface_Get_Call) Return(_a0 *settings.AibiDashboardEmbeddingApprovedDomainsSetting, _a1 error) *MockAibiDashboardEmbeddingApprovedDomainsInterface_Get_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAibiDashboardEmbeddingApprovedDomainsInterface_Get_Call) RunAndReturn(run func(context.Context, settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*settings.AibiDashboardEmbeddingApprovedDomainsSetting, error)) *MockAibiDashboardEmbeddingApprovedDomainsInterface_Get_Call { + _c.Call.Return(run) + return _c +} + +// Update provides a mock function with given fields: ctx, request +func (_m *MockAibiDashboardEmbeddingApprovedDomainsInterface) Update(ctx context.Context, request settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*settings.AibiDashboardEmbeddingApprovedDomainsSetting, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for Update") + } + + var r0 *settings.AibiDashboardEmbeddingApprovedDomainsSetting + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*settings.AibiDashboardEmbeddingApprovedDomainsSetting, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) *settings.AibiDashboardEmbeddingApprovedDomainsSetting); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.AibiDashboardEmbeddingApprovedDomainsSetting) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAibiDashboardEmbeddingApprovedDomainsInterface_Update_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Update' +type MockAibiDashboardEmbeddingApprovedDomainsInterface_Update_Call struct { + *mock.Call +} + +// Update is a helper method to define mock.On call +// - ctx context.Context +// - request settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest +func (_e *MockAibiDashboardEmbeddingApprovedDomainsInterface_Expecter) Update(ctx interface{}, request interface{}) *MockAibiDashboardEmbeddingApprovedDomainsInterface_Update_Call { + return &MockAibiDashboardEmbeddingApprovedDomainsInterface_Update_Call{Call: _e.mock.On("Update", ctx, request)} +} + +func (_c *MockAibiDashboardEmbeddingApprovedDomainsInterface_Update_Call) Run(run func(ctx context.Context, request settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest)) *MockAibiDashboardEmbeddingApprovedDomainsInterface_Update_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest)) + }) + return _c +} + +func (_c *MockAibiDashboardEmbeddingApprovedDomainsInterface_Update_Call) Return(_a0 *settings.AibiDashboardEmbeddingApprovedDomainsSetting, _a1 error) *MockAibiDashboardEmbeddingApprovedDomainsInterface_Update_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAibiDashboardEmbeddingApprovedDomainsInterface_Update_Call) RunAndReturn(run func(context.Context, settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*settings.AibiDashboardEmbeddingApprovedDomainsSetting, error)) *MockAibiDashboardEmbeddingApprovedDomainsInterface_Update_Call { + _c.Call.Return(run) + return _c +} + +// NewMockAibiDashboardEmbeddingApprovedDomainsInterface creates a new instance of MockAibiDashboardEmbeddingApprovedDomainsInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockAibiDashboardEmbeddingApprovedDomainsInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockAibiDashboardEmbeddingApprovedDomainsInterface { + mock := &MockAibiDashboardEmbeddingApprovedDomainsInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/settings/mock_settings_interface.go b/experimental/mocks/service/settings/mock_settings_interface.go index 7429b2e45..0e4a6a17a 100644 --- a/experimental/mocks/service/settings/mock_settings_interface.go +++ b/experimental/mocks/service/settings/mock_settings_interface.go @@ -20,6 +20,100 @@ func (_m *MockSettingsInterface) EXPECT() *MockSettingsInterface_Expecter { return &MockSettingsInterface_Expecter{mock: &_m.Mock} } +// AibiDashboardEmbeddingAccessPolicy provides a mock function with given fields: +func (_m *MockSettingsInterface) AibiDashboardEmbeddingAccessPolicy() settings.AibiDashboardEmbeddingAccessPolicyInterface { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AibiDashboardEmbeddingAccessPolicy") + } + + var r0 settings.AibiDashboardEmbeddingAccessPolicyInterface + if rf, ok := ret.Get(0).(func() settings.AibiDashboardEmbeddingAccessPolicyInterface); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(settings.AibiDashboardEmbeddingAccessPolicyInterface) + } + } + + return r0 +} + +// MockSettingsInterface_AibiDashboardEmbeddingAccessPolicy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AibiDashboardEmbeddingAccessPolicy' +type MockSettingsInterface_AibiDashboardEmbeddingAccessPolicy_Call struct { + *mock.Call +} + +// AibiDashboardEmbeddingAccessPolicy is a helper method to define mock.On call +func (_e *MockSettingsInterface_Expecter) AibiDashboardEmbeddingAccessPolicy() *MockSettingsInterface_AibiDashboardEmbeddingAccessPolicy_Call { + return &MockSettingsInterface_AibiDashboardEmbeddingAccessPolicy_Call{Call: _e.mock.On("AibiDashboardEmbeddingAccessPolicy")} +} + +func (_c *MockSettingsInterface_AibiDashboardEmbeddingAccessPolicy_Call) Run(run func()) *MockSettingsInterface_AibiDashboardEmbeddingAccessPolicy_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSettingsInterface_AibiDashboardEmbeddingAccessPolicy_Call) Return(_a0 settings.AibiDashboardEmbeddingAccessPolicyInterface) *MockSettingsInterface_AibiDashboardEmbeddingAccessPolicy_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockSettingsInterface_AibiDashboardEmbeddingAccessPolicy_Call) RunAndReturn(run func() settings.AibiDashboardEmbeddingAccessPolicyInterface) *MockSettingsInterface_AibiDashboardEmbeddingAccessPolicy_Call { + _c.Call.Return(run) + return _c +} + +// AibiDashboardEmbeddingApprovedDomains provides a mock function with given fields: +func (_m *MockSettingsInterface) AibiDashboardEmbeddingApprovedDomains() settings.AibiDashboardEmbeddingApprovedDomainsInterface { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AibiDashboardEmbeddingApprovedDomains") + } + + var r0 settings.AibiDashboardEmbeddingApprovedDomainsInterface + if rf, ok := ret.Get(0).(func() settings.AibiDashboardEmbeddingApprovedDomainsInterface); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(settings.AibiDashboardEmbeddingApprovedDomainsInterface) + } + } + + return r0 +} + +// MockSettingsInterface_AibiDashboardEmbeddingApprovedDomains_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AibiDashboardEmbeddingApprovedDomains' +type MockSettingsInterface_AibiDashboardEmbeddingApprovedDomains_Call struct { + *mock.Call +} + +// AibiDashboardEmbeddingApprovedDomains is a helper method to define mock.On call +func (_e *MockSettingsInterface_Expecter) AibiDashboardEmbeddingApprovedDomains() *MockSettingsInterface_AibiDashboardEmbeddingApprovedDomains_Call { + return &MockSettingsInterface_AibiDashboardEmbeddingApprovedDomains_Call{Call: _e.mock.On("AibiDashboardEmbeddingApprovedDomains")} +} + +func (_c *MockSettingsInterface_AibiDashboardEmbeddingApprovedDomains_Call) Run(run func()) *MockSettingsInterface_AibiDashboardEmbeddingApprovedDomains_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSettingsInterface_AibiDashboardEmbeddingApprovedDomains_Call) Return(_a0 settings.AibiDashboardEmbeddingApprovedDomainsInterface) *MockSettingsInterface_AibiDashboardEmbeddingApprovedDomains_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockSettingsInterface_AibiDashboardEmbeddingApprovedDomains_Call) RunAndReturn(run func() settings.AibiDashboardEmbeddingApprovedDomainsInterface) *MockSettingsInterface_AibiDashboardEmbeddingApprovedDomains_Call { + _c.Call.Return(run) + return _c +} + // AutomaticClusterUpdate provides a mock function with given fields: func (_m *MockSettingsInterface) AutomaticClusterUpdate() settings.AutomaticClusterUpdateInterface { ret := _m.Called() diff --git a/service/apps/impl.go b/service/apps/impl.go index fe8d2d6c2..85cd53555 100755 --- a/service/apps/impl.go +++ b/service/apps/impl.go @@ -21,7 +21,7 @@ func (a *appsImpl) Create(ctx context.Context, request CreateAppRequest) (*App, headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &app) + err := a.client.Do(ctx, http.MethodPost, path, headers, request.App, &app) return &app, err } @@ -40,7 +40,7 @@ func (a *appsImpl) Deploy(ctx context.Context, request CreateAppDeploymentReques headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &appDeployment) + err := a.client.Do(ctx, http.MethodPost, path, headers, request.AppDeployment, &appDeployment) return &appDeployment, err } @@ -134,7 +134,7 @@ func (a *appsImpl) Update(ctx context.Context, request UpdateAppRequest) (*App, headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &app) + err := a.client.Do(ctx, http.MethodPatch, path, headers, request.App, &app) return &app, err } diff --git a/service/apps/model.go b/service/apps/model.go index 030131ea2..385807883 100755 --- a/service/apps/model.go +++ b/service/apps/model.go @@ -593,51 +593,16 @@ func (s ComputeStatus) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Create an app deployment type CreateAppDeploymentRequest struct { + AppDeployment *AppDeployment `json:"app_deployment,omitempty"` // The name of the app. AppName string `json:"-" url:"-"` - // The unique id of the deployment. - DeploymentId string `json:"deployment_id,omitempty"` - // The mode of which the deployment will manage the source code. - Mode AppDeploymentMode `json:"mode,omitempty"` - // The workspace file system path of the source code used to create the app - // deployment. This is different from - // `deployment_artifacts.source_code_path`, which is the path used by the - // deployed app. The former refers to the original source code location of - // the app in the workspace during deployment creation, whereas the latter - // provides a system generated stable snapshotted source code path used by - // the deployment. - SourceCodePath string `json:"source_code_path,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *CreateAppDeploymentRequest) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s CreateAppDeploymentRequest) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) } +// Create an app type CreateAppRequest struct { - // The description of the app. - Description string `json:"description,omitempty"` - // The name of the app. The name must contain only lowercase alphanumeric - // characters and hyphens. It must be unique within the workspace. - Name string `json:"name"` - // Resources for the app. - Resources []AppResource `json:"resources,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *CreateAppRequest) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s CreateAppRequest) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) + App *App `json:"app,omitempty"` } // Delete an app @@ -760,22 +725,9 @@ type StopAppRequest struct { Name string `json:"-" url:"-"` } +// Update an app type UpdateAppRequest struct { - // The description of the app. - Description string `json:"description,omitempty"` - // The name of the app. The name must contain only lowercase alphanumeric - // characters and hyphens. It must be unique within the workspace. - Name string `json:"name" url:"-"` - // Resources for the app. - Resources []AppResource `json:"resources,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *UpdateAppRequest) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s UpdateAppRequest) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) + App *App `json:"app,omitempty"` + // The name of the app. + Name string `json:"-" url:"-"` } diff --git a/service/catalog/api.go b/service/catalog/api.go index ccb1068b3..3d7fc6a7e 100755 --- a/service/catalog/api.go +++ b/service/catalog/api.go @@ -6,9 +6,11 @@ package catalog import ( "context" "fmt" + "time" "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/retries" "github.com/databricks/databricks-sdk-go/useragent" ) @@ -1785,10 +1787,22 @@ func (a *ModelVersionsAPI) ListByFullName(ctx context.Context, fullName string) type OnlineTablesInterface interface { + // WaitGetOnlineTableActive repeatedly calls [OnlineTablesAPI.Get] and waits to reach ACTIVE state + WaitGetOnlineTableActive(ctx context.Context, name string, + timeout time.Duration, callback func(*OnlineTable)) (*OnlineTable, error) + // Create an Online Table. // // Create a new Online Table. - Create(ctx context.Context, request CreateOnlineTableRequest) (*OnlineTable, error) + Create(ctx context.Context, createOnlineTableRequest CreateOnlineTableRequest) (*WaitGetOnlineTableActive[OnlineTable], error) + + // Calls [OnlineTablesAPIInterface.Create] and waits to reach ACTIVE state + // + // You can override the default timeout of 20 minutes by calling adding + // retries.Timeout[OnlineTable](60*time.Minute) functional option. + // + // Deprecated: use [OnlineTablesAPIInterface.Create].Get() or [OnlineTablesAPIInterface.WaitGetOnlineTableActive] + CreateAndWait(ctx context.Context, createOnlineTableRequest CreateOnlineTableRequest, options ...retries.Option[OnlineTable]) (*OnlineTable, error) // Delete an Online Table. // @@ -1829,6 +1843,106 @@ type OnlineTablesAPI struct { onlineTablesImpl } +// WaitGetOnlineTableActive repeatedly calls [OnlineTablesAPI.Get] and waits to reach ACTIVE state +func (a *OnlineTablesAPI) WaitGetOnlineTableActive(ctx context.Context, name string, + timeout time.Duration, callback func(*OnlineTable)) (*OnlineTable, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "long-running") + return retries.Poll[OnlineTable](ctx, timeout, func() (*OnlineTable, *retries.Err) { + onlineTable, err := a.Get(ctx, GetOnlineTableRequest{ + Name: name, + }) + if err != nil { + return nil, retries.Halt(err) + } + if callback != nil { + callback(onlineTable) + } + status := onlineTable.UnityCatalogProvisioningState + statusMessage := fmt.Sprintf("current status: %s", status) + switch status { + case ProvisioningInfoStateActive: // target state + return onlineTable, nil + case ProvisioningInfoStateFailed: + err := fmt.Errorf("failed to reach %s, got %s: %s", + ProvisioningInfoStateActive, status, statusMessage) + return nil, retries.Halt(err) + default: + return nil, retries.Continues(statusMessage) + } + }) +} + +// WaitGetOnlineTableActive is a wrapper that calls [OnlineTablesAPI.WaitGetOnlineTableActive] and waits to reach ACTIVE state. +type WaitGetOnlineTableActive[R any] struct { + Response *R + Name string `json:"name"` + Poll func(time.Duration, func(*OnlineTable)) (*OnlineTable, error) + callback func(*OnlineTable) + timeout time.Duration +} + +// OnProgress invokes a callback every time it polls for the status update. +func (w *WaitGetOnlineTableActive[R]) OnProgress(callback func(*OnlineTable)) *WaitGetOnlineTableActive[R] { + w.callback = callback + return w +} + +// Get the OnlineTable with the default timeout of 20 minutes. +func (w *WaitGetOnlineTableActive[R]) Get() (*OnlineTable, error) { + return w.Poll(w.timeout, w.callback) +} + +// Get the OnlineTable with custom timeout. +func (w *WaitGetOnlineTableActive[R]) GetWithTimeout(timeout time.Duration) (*OnlineTable, error) { + return w.Poll(timeout, w.callback) +} + +// Create an Online Table. +// +// Create a new Online Table. +func (a *OnlineTablesAPI) Create(ctx context.Context, createOnlineTableRequest CreateOnlineTableRequest) (*WaitGetOnlineTableActive[OnlineTable], error) { + onlineTable, err := a.onlineTablesImpl.Create(ctx, createOnlineTableRequest) + if err != nil { + return nil, err + } + return &WaitGetOnlineTableActive[OnlineTable]{ + Response: onlineTable, + Name: onlineTable.Name, + Poll: func(timeout time.Duration, callback func(*OnlineTable)) (*OnlineTable, error) { + return a.WaitGetOnlineTableActive(ctx, onlineTable.Name, timeout, callback) + }, + timeout: 20 * time.Minute, + callback: nil, + }, nil +} + +// Calls [OnlineTablesAPI.Create] and waits to reach ACTIVE state +// +// You can override the default timeout of 20 minutes by calling adding +// retries.Timeout[OnlineTable](60*time.Minute) functional option. +// +// Deprecated: use [OnlineTablesAPI.Create].Get() or [OnlineTablesAPI.WaitGetOnlineTableActive] +func (a *OnlineTablesAPI) CreateAndWait(ctx context.Context, createOnlineTableRequest CreateOnlineTableRequest, options ...retries.Option[OnlineTable]) (*OnlineTable, error) { + wait, err := a.Create(ctx, createOnlineTableRequest) + if err != nil { + return nil, err + } + tmp := &retries.Info[OnlineTable]{Timeout: 20 * time.Minute} + for _, o := range options { + o(tmp) + } + wait.timeout = tmp.Timeout + wait.callback = func(info *OnlineTable) { + for _, o := range options { + o(&retries.Info[OnlineTable]{ + Info: info, + Timeout: wait.timeout, + }) + } + } + return wait.Get() +} + // Delete an Online Table. // // Delete an online table. Warning: This will delete all the data in the online diff --git a/service/catalog/impl.go b/service/catalog/impl.go index 4055d4f12..6f06d77c1 100755 --- a/service/catalog/impl.go +++ b/service/catalog/impl.go @@ -591,7 +591,7 @@ func (a *onlineTablesImpl) Create(ctx context.Context, request CreateOnlineTable headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &onlineTable) + err := a.client.Do(ctx, http.MethodPost, path, headers, request.Table, &onlineTable) return &onlineTable, err } diff --git a/service/catalog/model.go b/service/catalog/model.go index 279e985b5..4c48f561a 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -1096,22 +1096,10 @@ func (s CreateMonitor) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Online Table information. +// Create an Online Table type CreateOnlineTableRequest struct { - // Full three-part (catalog, schema, table) name of the table. - Name string `json:"name,omitempty"` - // Specification of the online table. - Spec *OnlineTableSpec `json:"spec,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *CreateOnlineTableRequest) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s CreateOnlineTableRequest) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) + // Online Table information. + Table *OnlineTable `json:"table,omitempty"` } type CreateRegisteredModelRequest struct { diff --git a/service/dashboards/api.go b/service/dashboards/api.go index 8107847f4..1ec369728 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -37,7 +37,7 @@ type GenieInterface interface { // Execute SQL query in a conversation message. // // Execute the SQL query in the message. - ExecuteMessageQuery(ctx context.Context, request ExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) + ExecuteMessageQuery(ctx context.Context, request GenieExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) // Get conversation message. // diff --git a/service/dashboards/impl.go b/service/dashboards/impl.go index d5a724822..babb7fc7c 100755 --- a/service/dashboards/impl.go +++ b/service/dashboards/impl.go @@ -25,7 +25,7 @@ func (a *genieImpl) CreateMessage(ctx context.Context, request GenieCreateConver return &genieMessage, err } -func (a *genieImpl) ExecuteMessageQuery(ctx context.Context, request ExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) { +func (a *genieImpl) ExecuteMessageQuery(ctx context.Context, request GenieExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) { var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/execute-query", request.SpaceId, request.ConversationId, request.MessageId) headers := make(map[string]string) @@ -73,7 +73,7 @@ func (a *lakeviewImpl) Create(ctx context.Context, request CreateDashboardReques headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &dashboard) + err := a.client.Do(ctx, http.MethodPost, path, headers, request.Dashboard, &dashboard) return &dashboard, err } @@ -83,7 +83,7 @@ func (a *lakeviewImpl) CreateSchedule(ctx context.Context, request CreateSchedul headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &schedule) + err := a.client.Do(ctx, http.MethodPost, path, headers, request.Schedule, &schedule) return &schedule, err } @@ -93,7 +93,7 @@ func (a *lakeviewImpl) CreateSubscription(ctx context.Context, request CreateSub headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &subscription) + err := a.client.Do(ctx, http.MethodPost, path, headers, request.Subscription, &subscription) return &subscription, err } @@ -222,7 +222,7 @@ func (a *lakeviewImpl) Update(ctx context.Context, request UpdateDashboardReques headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &dashboard) + err := a.client.Do(ctx, http.MethodPatch, path, headers, request.Dashboard, &dashboard) return &dashboard, err } @@ -232,6 +232,6 @@ func (a *lakeviewImpl) UpdateSchedule(ctx context.Context, request UpdateSchedul headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &schedule) + err := a.client.Do(ctx, http.MethodPut, path, headers, request.Schedule, &schedule) return &schedule, err } diff --git a/service/dashboards/interface.go b/service/dashboards/interface.go index deb1767c9..76c728fc8 100755 --- a/service/dashboards/interface.go +++ b/service/dashboards/interface.go @@ -23,7 +23,7 @@ type GenieService interface { // Execute SQL query in a conversation message. // // Execute the SQL query in the message. - ExecuteMessageQuery(ctx context.Context, request ExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) + ExecuteMessageQuery(ctx context.Context, request GenieExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) // Get conversation message. // diff --git a/service/dashboards/model.go b/service/dashboards/model.go index 8cbc5ad73..3f91f958d 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -9,65 +9,27 @@ import ( "github.com/databricks/databricks-sdk-go/service/sql" ) +// Create dashboard type CreateDashboardRequest struct { - // The display name of the dashboard. - DisplayName string `json:"display_name"` - // The workspace path of the folder containing the dashboard. Includes - // leading slash and no trailing slash. This field is excluded in List - // Dashboards responses. - ParentPath string `json:"parent_path,omitempty"` - // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. Use the [get dashboard API] to - // retrieve an example response, which includes the `serialized_dashboard` - // field. This field provides the structure of the JSON string that - // represents the dashboard's layout and components. - // - // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get - SerializedDashboard string `json:"serialized_dashboard,omitempty"` - // The warehouse ID used to run the dashboard. - WarehouseId string `json:"warehouse_id,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *CreateDashboardRequest) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s CreateDashboardRequest) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) + Dashboard *Dashboard `json:"dashboard,omitempty"` } +// Create dashboard schedule type CreateScheduleRequest struct { - // The cron expression describing the frequency of the periodic refresh for - // this schedule. - CronSchedule CronSchedule `json:"cron_schedule"` // UUID identifying the dashboard to which the schedule belongs. DashboardId string `json:"-" url:"-"` - // The display name for schedule. - DisplayName string `json:"display_name,omitempty"` - // The status indicates whether this schedule is paused or not. - PauseStatus SchedulePauseStatus `json:"pause_status,omitempty"` - ForceSendFields []string `json:"-"` -} - -func (s *CreateScheduleRequest) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s CreateScheduleRequest) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) + Schedule *Schedule `json:"schedule,omitempty"` } +// Create schedule subscription type CreateSubscriptionRequest struct { // UUID identifying the dashboard to which the subscription belongs. DashboardId string `json:"-" url:"-"` // UUID identifying the schedule to which the subscription belongs. ScheduleId string `json:"-" url:"-"` - // Subscriber details for users and destinations to be added as subscribers - // to the schedule. - Subscriber Subscriber `json:"subscriber"` + + Subscription *Subscription `json:"subscription,omitempty"` } type CronSchedule struct { @@ -204,16 +166,6 @@ func (s DeleteSubscriptionRequest) MarshalJSON() ([]byte, error) { type DeleteSubscriptionResponse struct { } -// Execute SQL query in a conversation message -type ExecuteMessageQueryRequest struct { - // Conversation ID - ConversationId string `json:"-" url:"-"` - // Message ID - MessageId string `json:"-" url:"-"` - // Genie space ID - SpaceId string `json:"-" url:"-"` -} - // Genie AI Response type GenieAttachment struct { Query *QueryAttachment `json:"query,omitempty"` @@ -255,6 +207,16 @@ type GenieCreateConversationMessageRequest struct { SpaceId string `json:"-" url:"-"` } +// Execute SQL query in a conversation message +type GenieExecuteMessageQueryRequest struct { + // Conversation ID + ConversationId string `json:"-" url:"-"` + // Message ID + MessageId string `json:"-" url:"-"` + // Genie space ID + SpaceId string `json:"-" url:"-"` +} + // Get conversation message type GenieGetConversationMessageRequest struct { // The ID associated with the target conversation. @@ -599,6 +561,8 @@ const MessageErrorTypeMessageDeletedWhileExecutingException MessageErrorType = ` const MessageErrorTypeMessageUpdatedWhileExecutingException MessageErrorType = `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION` +const MessageErrorTypeNoDeploymentsAvailableToWorkspace MessageErrorType = `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE` + const MessageErrorTypeNoQueryToVisualizeException MessageErrorType = `NO_QUERY_TO_VISUALIZE_EXCEPTION` const MessageErrorTypeNoTablesToQueryException MessageErrorType = `NO_TABLES_TO_QUERY_EXCEPTION` @@ -635,11 +599,11 @@ func (f *MessageErrorType) String() string { // Set raw string value and validate it against allowed values func (f *MessageErrorType) Set(v string) error { switch v { - case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: + case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: *f = MessageErrorType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) + return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) } } @@ -980,61 +944,19 @@ type UnpublishDashboardRequest struct { type UnpublishDashboardResponse struct { } +// Update dashboard type UpdateDashboardRequest struct { + Dashboard *Dashboard `json:"dashboard,omitempty"` // UUID identifying the dashboard. DashboardId string `json:"-" url:"-"` - // The display name of the dashboard. - DisplayName string `json:"display_name,omitempty"` - // The etag for the dashboard. Can be optionally provided on updates to - // ensure that the dashboard has not been modified since the last read. This - // field is excluded in List Dashboards responses. - Etag string `json:"etag,omitempty"` - // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. Use the [get dashboard API] to - // retrieve an example response, which includes the `serialized_dashboard` - // field. This field provides the structure of the JSON string that - // represents the dashboard's layout and components. - // - // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get - SerializedDashboard string `json:"serialized_dashboard,omitempty"` - // The warehouse ID used to run the dashboard. - WarehouseId string `json:"warehouse_id,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *UpdateDashboardRequest) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s UpdateDashboardRequest) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) } +// Update dashboard schedule type UpdateScheduleRequest struct { - // The cron expression describing the frequency of the periodic refresh for - // this schedule. - CronSchedule CronSchedule `json:"cron_schedule"` // UUID identifying the dashboard to which the schedule belongs. DashboardId string `json:"-" url:"-"` - // The display name for schedule. - DisplayName string `json:"display_name,omitempty"` - // The etag for the schedule. Must be left empty on create, must be provided - // on updates to ensure that the schedule has not been modified since the - // last read, and can be optionally provided on delete. - Etag string `json:"etag,omitempty"` - // The status indicates whether this schedule is paused or not. - PauseStatus SchedulePauseStatus `json:"pause_status,omitempty"` + + Schedule *Schedule `json:"schedule,omitempty"` // UUID identifying the schedule. ScheduleId string `json:"-" url:"-"` - - ForceSendFields []string `json:"-"` -} - -func (s *UpdateScheduleRequest) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s UpdateScheduleRequest) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) } diff --git a/service/jobs/model.go b/service/jobs/model.go index de0be530e..c8e99f852 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -879,9 +879,8 @@ type GetRunRequest struct { IncludeHistory bool `json:"-" url:"include_history,omitempty"` // Whether to include resolved parameter values in the response. IncludeResolvedValues bool `json:"-" url:"include_resolved_values,omitempty"` - // To list the next page or the previous page of job tasks, set this field - // to the value of the `next_page_token` or `prev_page_token` returned in - // the GetJob response. + // To list the next page of job tasks, set this field to the value of the + // `next_page_token` returned in the GetJob response. PageToken string `json:"-" url:"page_token,omitempty"` // The canonical identifier of the run for which to retrieve the metadata. // This field is required. @@ -2187,8 +2186,10 @@ type RepairRun struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []string `json:"jar_params,omitempty"` // Job-level parameters used in the run. for example `"param": // "overriding_val"` @@ -2458,8 +2459,6 @@ type Run struct { OriginalAttemptRunId int64 `json:"original_attempt_run_id,omitempty"` // The parameters used for this run. OverridingParameters *RunParameters `json:"overriding_parameters,omitempty"` - // A token that can be used to list the previous page of sub-resources. - PrevPageToken string `json:"prev_page_token,omitempty"` // The time in milliseconds that the run has spent in the queue. QueueDuration int64 `json:"queue_duration,omitempty"` // The repair history of the run. @@ -2667,8 +2666,10 @@ type RunJobTask struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []string `json:"jar_params,omitempty"` // ID of the job to trigger. JobId int64 `json:"job_id"` @@ -2876,8 +2877,10 @@ type RunNow struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []string `json:"jar_params,omitempty"` // The ID of the job to be executed JobId int64 `json:"job_id"` @@ -3045,8 +3048,10 @@ type RunParameters struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []string `json:"jar_params,omitempty"` // A map from keys to values for jobs with notebook task, for example // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed @@ -3240,13 +3245,14 @@ type RunTask struct { // cluster, this field is set once the Jobs service has requested a cluster // for the run. ClusterInstance *ClusterInstance `json:"cluster_instance,omitempty"` - // If condition_task, specifies a condition with an outcome that can be used - // to control the execution of other tasks. Does not require a cluster to - // execute and does not support retries or notifications. + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. ConditionTask *RunConditionTask `json:"condition_task,omitempty"` - // If dbt_task, indicates that this must execute a dbt task. It requires - // both Databricks SQL and the ability to use a serverless or a pro SQL - // warehouse. + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. DbtTask *DbtTask `json:"dbt_task,omitempty"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before @@ -3278,8 +3284,8 @@ type RunTask struct { // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId string `json:"existing_cluster_id,omitempty"` - // If for_each_task, indicates that this task must execute the nested task - // within it. + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. ForEachTask *RunForEachTask `json:"for_each_task,omitempty"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by @@ -3299,16 +3305,17 @@ type RunTask struct { // If new_cluster, a description of a new cluster that is created for each // run. NewCluster *compute.ClusterSpec `json:"new_cluster,omitempty"` - // If notebook_task, indicates that this task must run a notebook. This - // field may not be specified in conjunction with spark_jar_task. + // The task runs a notebook when the `notebook_task` field is present. NotebookTask *NotebookTask `json:"notebook_task,omitempty"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. NotificationSettings *TaskNotificationSettings `json:"notification_settings,omitempty"` - // If pipeline_task, indicates that this task must execute a Pipeline. + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` - // If python_wheel_task, indicates that this job must execute a PythonWheel. + // The task runs a Python wheel when the `python_wheel_task` field is + // present. PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` // The time in milliseconds that the run has spent in the queue. QueueDuration int64 `json:"queue_duration,omitempty"` @@ -3324,7 +3331,7 @@ type RunTask struct { // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf RunIf `json:"run_if,omitempty"` - // If run_job_task, indicates that this task must execute another job. + // The task triggers another job when the `run_job_task` field is present. RunJobTask *RunJobTask `json:"run_job_task,omitempty"` RunPageUrl string `json:"run_page_url,omitempty"` @@ -3336,12 +3343,14 @@ type RunTask struct { // job runs. The total duration of a multitask job run is the value of the // `run_duration` field. SetupDuration int64 `json:"setup_duration,omitempty"` - // If spark_jar_task, indicates that this task must run a JAR. + // The task runs a JAR when the `spark_jar_task` field is present. SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` - // If spark_python_task, indicates that this task must run a Python file. + // The task runs a Python file when the `spark_python_task` field is + // present. SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` - // If `spark_submit_task`, indicates that this task must be launched by the - // spark submit script. This task can run only on new clusters. + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python @@ -3358,7 +3367,8 @@ type RunTask struct { // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` - // If sql_task, indicates that this job must execute a SQL task. + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. SqlTask *SqlTask `json:"sql_task,omitempty"` // The time at which this run was started in epoch milliseconds // (milliseconds since 1/1/1970 UTC). This may not be the time when the job @@ -3928,13 +3938,14 @@ func (s SubmitRunResponse) MarshalJSON() ([]byte, error) { } type SubmitTask struct { - // If condition_task, specifies a condition with an outcome that can be used - // to control the execution of other tasks. Does not require a cluster to - // execute and does not support retries or notifications. + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` - // If dbt_task, indicates that this must execute a dbt task. It requires - // both Databricks SQL and the ability to use a serverless or a pro SQL - // warehouse. + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. DbtTask *DbtTask `json:"dbt_task,omitempty"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before @@ -3955,8 +3966,8 @@ type SubmitTask struct { // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId string `json:"existing_cluster_id,omitempty"` - // If for_each_task, indicates that this task must execute the nested task - // within it. + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. ForEachTask *ForEachTask `json:"for_each_task,omitempty"` // An optional set of health rules that can be defined for this job. Health *JobsHealthRules `json:"health,omitempty"` @@ -3966,30 +3977,33 @@ type SubmitTask struct { // If new_cluster, a description of a new cluster that is created for each // run. NewCluster *compute.ClusterSpec `json:"new_cluster,omitempty"` - // If notebook_task, indicates that this task must run a notebook. This - // field may not be specified in conjunction with spark_jar_task. + // The task runs a notebook when the `notebook_task` field is present. NotebookTask *NotebookTask `json:"notebook_task,omitempty"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. NotificationSettings *TaskNotificationSettings `json:"notification_settings,omitempty"` - // If pipeline_task, indicates that this task must execute a Pipeline. + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` - // If python_wheel_task, indicates that this job must execute a PythonWheel. + // The task runs a Python wheel when the `python_wheel_task` field is + // present. PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` // An optional value indicating the condition that determines whether the // task should be run once its dependencies have been completed. When // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf RunIf `json:"run_if,omitempty"` - // If run_job_task, indicates that this task must execute another job. + // The task triggers another job when the `run_job_task` field is present. RunJobTask *RunJobTask `json:"run_job_task,omitempty"` - // If spark_jar_task, indicates that this task must run a JAR. + // The task runs a JAR when the `spark_jar_task` field is present. SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` - // If spark_python_task, indicates that this task must run a Python file. + // The task runs a Python file when the `spark_python_task` field is + // present. SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` - // If `spark_submit_task`, indicates that this task must be launched by the - // spark submit script. This task can run only on new clusters. + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python @@ -4006,7 +4020,8 @@ type SubmitTask struct { // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` - // If sql_task, indicates that this job must execute a SQL task. + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. SqlTask *SqlTask `json:"sql_task,omitempty"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent @@ -4060,13 +4075,14 @@ func (s TableUpdateTriggerConfiguration) MarshalJSON() ([]byte, error) { } type Task struct { - // If condition_task, specifies a condition with an outcome that can be used - // to control the execution of other tasks. Does not require a cluster to - // execute and does not support retries or notifications. + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` - // If dbt_task, indicates that this must execute a dbt task. It requires - // both Databricks SQL and the ability to use a serverless or a pro SQL - // warehouse. + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. DbtTask *DbtTask `json:"dbt_task,omitempty"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete before executing this @@ -4090,8 +4106,8 @@ type Task struct { // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId string `json:"existing_cluster_id,omitempty"` - // If for_each_task, indicates that this task must execute the nested task - // within it. + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. ForEachTask *ForEachTask `json:"for_each_task,omitempty"` // An optional set of health rules that can be defined for this job. Health *JobsHealthRules `json:"health,omitempty"` @@ -4113,16 +4129,17 @@ type Task struct { // If new_cluster, a description of a new cluster that is created for each // run. NewCluster *compute.ClusterSpec `json:"new_cluster,omitempty"` - // If notebook_task, indicates that this task must run a notebook. This - // field may not be specified in conjunction with spark_jar_task. + // The task runs a notebook when the `notebook_task` field is present. NotebookTask *NotebookTask `json:"notebook_task,omitempty"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task. NotificationSettings *TaskNotificationSettings `json:"notification_settings,omitempty"` - // If pipeline_task, indicates that this task must execute a Pipeline. + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` - // If python_wheel_task, indicates that this job must execute a PythonWheel. + // The task runs a Python wheel when the `python_wheel_task` field is + // present. PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` // An optional policy to specify whether to retry a job when it times out. // The default behavior is to not retry on timeout. @@ -4137,14 +4154,16 @@ type Task struct { // `AT_LEAST_ONE_FAILED`: At least one dependency failed * `ALL_FAILED`: ALl // dependencies have failed RunIf RunIf `json:"run_if,omitempty"` - // If run_job_task, indicates that this task must execute another job. + // The task triggers another job when the `run_job_task` field is present. RunJobTask *RunJobTask `json:"run_job_task,omitempty"` - // If spark_jar_task, indicates that this task must run a JAR. + // The task runs a JAR when the `spark_jar_task` field is present. SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` - // If spark_python_task, indicates that this task must run a Python file. + // The task runs a Python file when the `spark_python_task` field is + // present. SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` - // If `spark_submit_task`, indicates that this task must be launched by the - // spark submit script. This task can run only on new clusters. + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python @@ -4161,7 +4180,8 @@ type Task struct { // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` - // If sql_task, indicates that this job must execute a SQL task. + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. SqlTask *SqlTask `json:"sql_task,omitempty"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent diff --git a/service/marketplace/model.go b/service/marketplace/model.go index 2096dc213..d96905e74 100755 --- a/service/marketplace/model.go +++ b/service/marketplace/model.go @@ -30,6 +30,8 @@ const AssetTypeAssetTypeModel AssetType = `ASSET_TYPE_MODEL` const AssetTypeAssetTypeNotebook AssetType = `ASSET_TYPE_NOTEBOOK` +const AssetTypeAssetTypePartnerIntegration AssetType = `ASSET_TYPE_PARTNER_INTEGRATION` + // String representation for [fmt.Print] func (f *AssetType) String() string { return string(*f) @@ -38,11 +40,11 @@ func (f *AssetType) String() string { // Set raw string value and validate it against allowed values func (f *AssetType) Set(v string) error { switch v { - case `ASSET_TYPE_DATA_TABLE`, `ASSET_TYPE_GIT_REPO`, `ASSET_TYPE_MEDIA`, `ASSET_TYPE_MODEL`, `ASSET_TYPE_NOTEBOOK`: + case `ASSET_TYPE_DATA_TABLE`, `ASSET_TYPE_GIT_REPO`, `ASSET_TYPE_MEDIA`, `ASSET_TYPE_MODEL`, `ASSET_TYPE_NOTEBOOK`, `ASSET_TYPE_PARTNER_INTEGRATION`: *f = AssetType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ASSET_TYPE_DATA_TABLE", "ASSET_TYPE_GIT_REPO", "ASSET_TYPE_MEDIA", "ASSET_TYPE_MODEL", "ASSET_TYPE_NOTEBOOK"`, v) + return fmt.Errorf(`value "%s" is not one of "ASSET_TYPE_DATA_TABLE", "ASSET_TYPE_GIT_REPO", "ASSET_TYPE_MEDIA", "ASSET_TYPE_MODEL", "ASSET_TYPE_NOTEBOOK", "ASSET_TYPE_PARTNER_INTEGRATION"`, v) } } diff --git a/service/oauth2/api.go b/service/oauth2/api.go index a13184d3d..bd5dea941 100755 --- a/service/oauth2/api.go +++ b/service/oauth2/api.go @@ -437,12 +437,18 @@ func (a *ServicePrincipalSecretsAPI) List(ctx context.Context, request ListServi getItems := func(resp *ListServicePrincipalSecretsResponse) []SecretInfo { return resp.Secrets } - + getNextReq := func(resp *ListServicePrincipalSecretsResponse) *ListServicePrincipalSecretsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } iterator := listing.NewIterator( &request, getNextPage, getItems, - nil) + getNextReq) return iterator } diff --git a/service/oauth2/interface.go b/service/oauth2/interface.go index 45a124880..016449037 100755 --- a/service/oauth2/interface.go +++ b/service/oauth2/interface.go @@ -129,6 +129,6 @@ type ServicePrincipalSecretsService interface { // operation only returns information about the secrets themselves and does // not include the secret values. // - // Use ListAll() to get all SecretInfo instances + // Use ListAll() to get all SecretInfo instances, which will iterate over every result page. List(ctx context.Context, request ListServicePrincipalSecretsRequest) (*ListServicePrincipalSecretsResponse, error) } diff --git a/service/oauth2/model.go b/service/oauth2/model.go index cf81758fa..a15cd2f89 100755 --- a/service/oauth2/model.go +++ b/service/oauth2/model.go @@ -336,13 +336,45 @@ func (s ListPublishedAppIntegrationsRequest) MarshalJSON() ([]byte, error) { // List service principal secrets type ListServicePrincipalSecretsRequest struct { + // An opaque page token which was the `next_page_token` in the response of + // the previous request to list the secrets for this service principal. + // Provide this token to retrieve the next page of secret entries. When + // providing a `page_token`, all other parameters provided to the request + // must match the previous request. To list all of the secrets for a service + // principal, it is necessary to continue requesting pages of entries until + // the response contains no `next_page_token`. Note that the number of + // entries returned must not be used to determine when the listing is + // complete. + PageToken string `json:"-" url:"page_token,omitempty"` // The service principal ID. ServicePrincipalId int64 `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListServicePrincipalSecretsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListServicePrincipalSecretsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type ListServicePrincipalSecretsResponse struct { + // A token, which can be sent as `page_token` to retrieve the next page. + NextPageToken string `json:"next_page_token,omitempty"` // List of the secrets Secrets []SecretInfo `json:"secrets,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListServicePrincipalSecretsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListServicePrincipalSecretsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type PublishedAppOutput struct { diff --git a/service/pkg.go b/service/pkg.go index 1103f2249..a811484b2 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -4,6 +4,10 @@ // // - [iam.AccountAccessControlProxyAPI]: These APIs manage access rules on resources in an account. // +// - [settings.AibiDashboardEmbeddingAccessPolicyAPI]: Controls whether AI/BI published dashboard embedding is enabled, conditionally enabled, or disabled at the workspace level. +// +// - [settings.AibiDashboardEmbeddingApprovedDomainsAPI]: Controls the list of domains approved to host the embedded AI/BI dashboards. +// // - [sql.AlertsAPI]: The alerts API can be used to perform CRUD operations on alerts. // // - [sql.AlertsLegacyAPI]: The alerts API can be used to perform CRUD operations on alerts. @@ -286,6 +290,8 @@ import ( var ( _ *iam.AccountAccessControlAPI = nil _ *iam.AccountAccessControlProxyAPI = nil + _ *settings.AibiDashboardEmbeddingAccessPolicyAPI = nil + _ *settings.AibiDashboardEmbeddingApprovedDomainsAPI = nil _ *sql.AlertsAPI = nil _ *sql.AlertsLegacyAPI = nil _ *apps.AppsAPI = nil diff --git a/service/provisioning/model.go b/service/provisioning/model.go index 26167aa3c..ca8e3959a 100755 --- a/service/provisioning/model.go +++ b/service/provisioning/model.go @@ -253,6 +253,8 @@ type CreateWorkspaceRequest struct { GcpManagedNetworkConfig *GcpManagedNetworkConfig `json:"gcp_managed_network_config,omitempty"` // The configurations for the GKE cluster of a Databricks workspace. GkeConfig *GkeConfig `json:"gke_config,omitempty"` + // Whether no public IP is enabled for the workspace. + IsNoPublicIpEnabled bool `json:"is_no_public_ip_enabled,omitempty"` // The Google Cloud region of the workspace data plane in your Google // account. For example, `us-east4`. Location string `json:"location,omitempty"` @@ -484,6 +486,25 @@ func (f *ErrorType) Type() string { return "ErrorType" } +type ExternalCustomerInfo struct { + // Email of the authoritative user. + AuthoritativeUserEmail string `json:"authoritative_user_email,omitempty"` + // The authoritative user full name. + AuthoritativeUserFullName string `json:"authoritative_user_full_name,omitempty"` + // The legal entity name for the external workspace + CustomerName string `json:"customer_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExternalCustomerInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExternalCustomerInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type GcpKeyInfo struct { // The GCP KMS key's resource name KmsKeyId string `json:"kms_key_id"` @@ -1220,6 +1241,10 @@ type Workspace struct { // This value must be unique across all non-deleted deployments across all // AWS regions. DeploymentName string `json:"deployment_name,omitempty"` + // If this workspace is for a external customer, then external_customer_info + // is populated. If this workspace is not for a external customer, then + // external_customer_info is empty. + ExternalCustomerInfo *ExternalCustomerInfo `json:"external_customer_info,omitempty"` // The network settings for the workspace. The configurations are only for // Databricks-managed VPCs. It is ignored if you specify a customer-managed // VPC in the `network_id` field.", All the IP range configurations must be @@ -1246,6 +1271,8 @@ type Workspace struct { GcpManagedNetworkConfig *GcpManagedNetworkConfig `json:"gcp_managed_network_config,omitempty"` // The configurations for the GKE cluster of a Databricks workspace. GkeConfig *GkeConfig `json:"gke_config,omitempty"` + // Whether no public IP is enabled for the workspace. + IsNoPublicIpEnabled bool `json:"is_no_public_ip_enabled,omitempty"` // The Google Cloud region of the workspace data plane in your Google // account (for example, `us-east4`). Location string `json:"location,omitempty"` diff --git a/service/settings/api.go b/service/settings/api.go index 96080a680..fba10af37 100755 --- a/service/settings/api.go +++ b/service/settings/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Account Ip Access Lists, Account Settings, Automatic Cluster Update, Compliance Security Profile, Credentials Manager, Csp Enablement Account, Default Namespace, Disable Legacy Access, Disable Legacy Dbfs, Disable Legacy Features, Enhanced Security Monitoring, Esm Enablement Account, Ip Access Lists, Network Connectivity, Notification Destinations, Personal Compute, Restrict Workspace Admins, Settings, Token Management, Tokens, Workspace Conf, etc. +// These APIs allow you to manage Account Ip Access Lists, Account Settings, Aibi Dashboard Embedding Access Policy, Aibi Dashboard Embedding Approved Domains, Automatic Cluster Update, Compliance Security Profile, Credentials Manager, Csp Enablement Account, Default Namespace, Disable Legacy Access, Disable Legacy Dbfs, Disable Legacy Features, Enhanced Security Monitoring, Esm Enablement Account, Ip Access Lists, Network Connectivity, Notification Destinations, Personal Compute, Restrict Workspace Admins, Settings, Token Management, Tokens, Workspace Conf, etc. package settings import ( @@ -371,6 +371,66 @@ func (a *AccountSettingsAPI) PersonalCompute() PersonalComputeInterface { return a.personalCompute } +type AibiDashboardEmbeddingAccessPolicyInterface interface { + + // Retrieve the AI/BI dashboard embedding access policy. + // + // Retrieves the AI/BI dashboard embedding access policy. The default setting is + // ALLOW_APPROVED_DOMAINS, permitting AI/BI dashboards to be embedded on + // approved domains. + Get(ctx context.Context, request GetAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) + + // Update the AI/BI dashboard embedding access policy. + // + // Updates the AI/BI dashboard embedding access policy at the workspace level. + Update(ctx context.Context, request UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) +} + +func NewAibiDashboardEmbeddingAccessPolicy(client *client.DatabricksClient) *AibiDashboardEmbeddingAccessPolicyAPI { + return &AibiDashboardEmbeddingAccessPolicyAPI{ + aibiDashboardEmbeddingAccessPolicyImpl: aibiDashboardEmbeddingAccessPolicyImpl{ + client: client, + }, + } +} + +// Controls whether AI/BI published dashboard embedding is enabled, +// conditionally enabled, or disabled at the workspace level. By default, this +// setting is conditionally enabled (ALLOW_APPROVED_DOMAINS). +type AibiDashboardEmbeddingAccessPolicyAPI struct { + aibiDashboardEmbeddingAccessPolicyImpl +} + +type AibiDashboardEmbeddingApprovedDomainsInterface interface { + + // Retrieve the list of domains approved to host embedded AI/BI dashboards. + // + // Retrieves the list of domains approved to host embedded AI/BI dashboards. + Get(ctx context.Context, request GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) + + // Update the list of domains approved to host embedded AI/BI dashboards. + // + // Updates the list of domains approved to host embedded AI/BI dashboards. This + // update will fail if the current workspace access policy is not + // ALLOW_APPROVED_DOMAINS. + Update(ctx context.Context, request UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) +} + +func NewAibiDashboardEmbeddingApprovedDomains(client *client.DatabricksClient) *AibiDashboardEmbeddingApprovedDomainsAPI { + return &AibiDashboardEmbeddingApprovedDomainsAPI{ + aibiDashboardEmbeddingApprovedDomainsImpl: aibiDashboardEmbeddingApprovedDomainsImpl{ + client: client, + }, + } +} + +// Controls the list of domains approved to host the embedded AI/BI dashboards. +// The approved domains list can't be mutated when the current access policy is +// not set to ALLOW_APPROVED_DOMAINS. +type AibiDashboardEmbeddingApprovedDomainsAPI struct { + aibiDashboardEmbeddingApprovedDomainsImpl +} + type AutomaticClusterUpdateInterface interface { // Get the automatic cluster update setting. @@ -1429,6 +1489,16 @@ type RestrictWorkspaceAdminsAPI struct { type SettingsInterface interface { + // Controls whether AI/BI published dashboard embedding is enabled, + // conditionally enabled, or disabled at the workspace level. By default, + // this setting is conditionally enabled (ALLOW_APPROVED_DOMAINS). + AibiDashboardEmbeddingAccessPolicy() AibiDashboardEmbeddingAccessPolicyInterface + + // Controls the list of domains approved to host the embedded AI/BI + // dashboards. The approved domains list can't be mutated when the current + // access policy is not set to ALLOW_APPROVED_DOMAINS. + AibiDashboardEmbeddingApprovedDomains() AibiDashboardEmbeddingApprovedDomainsInterface + // Controls whether automatic cluster update is enabled for the current // workspace. By default, it is turned off. AutomaticClusterUpdate() AutomaticClusterUpdateInterface @@ -1500,6 +1570,10 @@ func NewSettings(client *client.DatabricksClient) *SettingsAPI { client: client, }, + aibiDashboardEmbeddingAccessPolicy: NewAibiDashboardEmbeddingAccessPolicy(client), + + aibiDashboardEmbeddingApprovedDomains: NewAibiDashboardEmbeddingApprovedDomains(client), + automaticClusterUpdate: NewAutomaticClusterUpdate(client), complianceSecurityProfile: NewComplianceSecurityProfile(client), @@ -1521,6 +1595,16 @@ func NewSettings(client *client.DatabricksClient) *SettingsAPI { type SettingsAPI struct { settingsImpl + // Controls whether AI/BI published dashboard embedding is enabled, + // conditionally enabled, or disabled at the workspace level. By default, + // this setting is conditionally enabled (ALLOW_APPROVED_DOMAINS). + aibiDashboardEmbeddingAccessPolicy AibiDashboardEmbeddingAccessPolicyInterface + + // Controls the list of domains approved to host the embedded AI/BI + // dashboards. The approved domains list can't be mutated when the current + // access policy is not set to ALLOW_APPROVED_DOMAINS. + aibiDashboardEmbeddingApprovedDomains AibiDashboardEmbeddingApprovedDomainsInterface + // Controls whether automatic cluster update is enabled for the current // workspace. By default, it is turned off. automaticClusterUpdate AutomaticClusterUpdateInterface @@ -1586,6 +1670,14 @@ type SettingsAPI struct { restrictWorkspaceAdmins RestrictWorkspaceAdminsInterface } +func (a *SettingsAPI) AibiDashboardEmbeddingAccessPolicy() AibiDashboardEmbeddingAccessPolicyInterface { + return a.aibiDashboardEmbeddingAccessPolicy +} + +func (a *SettingsAPI) AibiDashboardEmbeddingApprovedDomains() AibiDashboardEmbeddingApprovedDomainsInterface { + return a.aibiDashboardEmbeddingApprovedDomains +} + func (a *SettingsAPI) AutomaticClusterUpdate() AutomaticClusterUpdateInterface { return a.automaticClusterUpdate } diff --git a/service/settings/impl.go b/service/settings/impl.go index 36d93fc2b..f1db1ab3c 100755 --- a/service/settings/impl.go +++ b/service/settings/impl.go @@ -77,6 +77,54 @@ type accountSettingsImpl struct { client *client.DatabricksClient } +// unexported type that holds implementations of just AibiDashboardEmbeddingAccessPolicy API methods +type aibiDashboardEmbeddingAccessPolicyImpl struct { + client *client.DatabricksClient +} + +func (a *aibiDashboardEmbeddingAccessPolicyImpl) Get(ctx context.Context, request GetAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) { + var aibiDashboardEmbeddingAccessPolicySetting AibiDashboardEmbeddingAccessPolicySetting + path := "/api/2.0/settings/types/aibi_dash_embed_ws_acc_policy/names/default" + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, request, &aibiDashboardEmbeddingAccessPolicySetting) + return &aibiDashboardEmbeddingAccessPolicySetting, err +} + +func (a *aibiDashboardEmbeddingAccessPolicyImpl) Update(ctx context.Context, request UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) { + var aibiDashboardEmbeddingAccessPolicySetting AibiDashboardEmbeddingAccessPolicySetting + path := "/api/2.0/settings/types/aibi_dash_embed_ws_acc_policy/names/default" + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &aibiDashboardEmbeddingAccessPolicySetting) + return &aibiDashboardEmbeddingAccessPolicySetting, err +} + +// unexported type that holds implementations of just AibiDashboardEmbeddingApprovedDomains API methods +type aibiDashboardEmbeddingApprovedDomainsImpl struct { + client *client.DatabricksClient +} + +func (a *aibiDashboardEmbeddingApprovedDomainsImpl) Get(ctx context.Context, request GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) { + var aibiDashboardEmbeddingApprovedDomainsSetting AibiDashboardEmbeddingApprovedDomainsSetting + path := "/api/2.0/settings/types/aibi_dash_embed_ws_apprvd_domains/names/default" + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, request, &aibiDashboardEmbeddingApprovedDomainsSetting) + return &aibiDashboardEmbeddingApprovedDomainsSetting, err +} + +func (a *aibiDashboardEmbeddingApprovedDomainsImpl) Update(ctx context.Context, request UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) { + var aibiDashboardEmbeddingApprovedDomainsSetting AibiDashboardEmbeddingApprovedDomainsSetting + path := "/api/2.0/settings/types/aibi_dash_embed_ws_apprvd_domains/names/default" + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &aibiDashboardEmbeddingApprovedDomainsSetting) + return &aibiDashboardEmbeddingApprovedDomainsSetting, err +} + // unexported type that holds implementations of just AutomaticClusterUpdate API methods type automaticClusterUpdateImpl struct { client *client.DatabricksClient diff --git a/service/settings/interface.go b/service/settings/interface.go index 8e8f0cd3d..5799ac700 100755 --- a/service/settings/interface.go +++ b/service/settings/interface.go @@ -107,6 +107,43 @@ type AccountIpAccessListsService interface { type AccountSettingsService interface { } +// Controls whether AI/BI published dashboard embedding is enabled, +// conditionally enabled, or disabled at the workspace level. By default, this +// setting is conditionally enabled (ALLOW_APPROVED_DOMAINS). +type AibiDashboardEmbeddingAccessPolicyService interface { + + // Retrieve the AI/BI dashboard embedding access policy. + // + // Retrieves the AI/BI dashboard embedding access policy. The default + // setting is ALLOW_APPROVED_DOMAINS, permitting AI/BI dashboards to be + // embedded on approved domains. + Get(ctx context.Context, request GetAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) + + // Update the AI/BI dashboard embedding access policy. + // + // Updates the AI/BI dashboard embedding access policy at the workspace + // level. + Update(ctx context.Context, request UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) +} + +// Controls the list of domains approved to host the embedded AI/BI dashboards. +// The approved domains list can't be mutated when the current access policy is +// not set to ALLOW_APPROVED_DOMAINS. +type AibiDashboardEmbeddingApprovedDomainsService interface { + + // Retrieve the list of domains approved to host embedded AI/BI dashboards. + // + // Retrieves the list of domains approved to host embedded AI/BI dashboards. + Get(ctx context.Context, request GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) + + // Update the list of domains approved to host embedded AI/BI dashboards. + // + // Updates the list of domains approved to host embedded AI/BI dashboards. + // This update will fail if the current workspace access policy is not + // ALLOW_APPROVED_DOMAINS. + Update(ctx context.Context, request UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) +} + // Controls whether automatic cluster update is enabled for the current // workspace. By default, it is turned off. type AutomaticClusterUpdateService interface { diff --git a/service/settings/model.go b/service/settings/model.go index 457e3d471..01020544f 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -8,6 +8,99 @@ import ( "github.com/databricks/databricks-sdk-go/marshal" ) +type AibiDashboardEmbeddingAccessPolicy struct { + AccessPolicyType AibiDashboardEmbeddingAccessPolicyAccessPolicyType `json:"access_policy_type"` +} + +type AibiDashboardEmbeddingAccessPolicyAccessPolicyType string + +const AibiDashboardEmbeddingAccessPolicyAccessPolicyTypeAllowAllDomains AibiDashboardEmbeddingAccessPolicyAccessPolicyType = `ALLOW_ALL_DOMAINS` + +const AibiDashboardEmbeddingAccessPolicyAccessPolicyTypeAllowApprovedDomains AibiDashboardEmbeddingAccessPolicyAccessPolicyType = `ALLOW_APPROVED_DOMAINS` + +const AibiDashboardEmbeddingAccessPolicyAccessPolicyTypeDenyAllDomains AibiDashboardEmbeddingAccessPolicyAccessPolicyType = `DENY_ALL_DOMAINS` + +// String representation for [fmt.Print] +func (f *AibiDashboardEmbeddingAccessPolicyAccessPolicyType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AibiDashboardEmbeddingAccessPolicyAccessPolicyType) Set(v string) error { + switch v { + case `ALLOW_ALL_DOMAINS`, `ALLOW_APPROVED_DOMAINS`, `DENY_ALL_DOMAINS`: + *f = AibiDashboardEmbeddingAccessPolicyAccessPolicyType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALLOW_ALL_DOMAINS", "ALLOW_APPROVED_DOMAINS", "DENY_ALL_DOMAINS"`, v) + } +} + +// Type always returns AibiDashboardEmbeddingAccessPolicyAccessPolicyType to satisfy [pflag.Value] interface +func (f *AibiDashboardEmbeddingAccessPolicyAccessPolicyType) Type() string { + return "AibiDashboardEmbeddingAccessPolicyAccessPolicyType" +} + +type AibiDashboardEmbeddingAccessPolicySetting struct { + AibiDashboardEmbeddingAccessPolicy AibiDashboardEmbeddingAccessPolicy `json:"aibi_dashboard_embedding_access_policy"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AibiDashboardEmbeddingAccessPolicySetting) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AibiDashboardEmbeddingAccessPolicySetting) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AibiDashboardEmbeddingApprovedDomains struct { + ApprovedDomains []string `json:"approved_domains,omitempty"` +} + +type AibiDashboardEmbeddingApprovedDomainsSetting struct { + AibiDashboardEmbeddingApprovedDomains AibiDashboardEmbeddingApprovedDomains `json:"aibi_dashboard_embedding_approved_domains"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AibiDashboardEmbeddingApprovedDomainsSetting) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AibiDashboardEmbeddingApprovedDomainsSetting) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type AutomaticClusterUpdateSetting struct { AutomaticClusterUpdateWorkspace ClusterAutoRestartMessage `json:"automatic_cluster_update_workspace"` // etag used for versioning. The response is at least as fresh as the eTag @@ -1099,6 +1192,50 @@ type GetAccountIpAccessListRequest struct { IpAccessListId string `json:"-" url:"-"` } +// Retrieve the AI/BI dashboard embedding access policy +type GetAibiDashboardEmbeddingAccessPolicySettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetAibiDashboardEmbeddingAccessPolicySettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetAibiDashboardEmbeddingAccessPolicySettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Retrieve the list of domains approved to host embedded AI/BI dashboards +type GetAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get the automatic cluster update setting type GetAutomaticClusterUpdateSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -2200,6 +2337,9 @@ type TokenInfo struct { CreationTime int64 `json:"creation_time,omitempty"` // Timestamp when the token expires. ExpiryTime int64 `json:"expiry_time,omitempty"` + // Approximate timestamp for the day the token was last used. Accurate up to + // 1 day. + LastUsedDay int64 `json:"last_used_day,omitempty"` // User ID of the user that owns the token. OwnerId int64 `json:"owner_id,omitempty"` // ID of the token. @@ -2329,6 +2469,34 @@ func (f *TokenType) Type() string { return "TokenType" } +// Details required to update a setting. +type UpdateAibiDashboardEmbeddingAccessPolicySettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask string `json:"field_mask"` + + Setting AibiDashboardEmbeddingAccessPolicySetting `json:"setting"` +} + +// Details required to update a setting. +type UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask string `json:"field_mask"` + + Setting AibiDashboardEmbeddingApprovedDomainsSetting `json:"setting"` +} + // Details required to update a setting. type UpdateAutomaticClusterUpdateSettingRequest struct { // This should always be set to true for Settings API. Added for AIP diff --git a/service/sql/api.go b/service/sql/api.go index 66c9d2323..49c4846c7 100755 --- a/service/sql/api.go +++ b/service/sql/api.go @@ -1794,12 +1794,12 @@ func NewStatementExecution(client *client.DatabricksClient) *StatementExecutionA // request arrives. Polling for status until a terminal state is reached is a // reliable way to determine the final state. - Wait timeouts are approximate, // occur server-side, and cannot account for things such as caller delays and -// network latency from caller to service. - The system will auto-close a -// statement after one hour if the client stops polling and thus you must poll -// at least once an hour. - The results are only available for one hour after -// success; polling does not extend this. - The SQL Execution API must be used -// for the entire lifecycle of the statement. For example, you cannot use the -// Jobs API to execute the command, and then the SQL Execution API to cancel it. +// network latency from caller to service. - To guarantee that the statement is +// kept alive, you must poll at least once every 15 minutes. - The results are +// only available for one hour after success; polling does not extend this. - +// The SQL Execution API must be used for the entire lifecycle of the statement. +// For example, you cannot use the Jobs API to execute the command, and then the +// SQL Execution API to cancel it. // // [Apache Arrow Columnar]: https://arrow.apache.org/overview/ // [Databricks SQL Statement Execution API tutorial]: https://docs.databricks.com/sql/api/sql-execution-tutorial.html diff --git a/service/sql/interface.go b/service/sql/interface.go index 01105543f..985b2b724 100755 --- a/service/sql/interface.go +++ b/service/sql/interface.go @@ -575,12 +575,12 @@ type QueryVisualizationsLegacyService interface { // request arrives. Polling for status until a terminal state is reached is a // reliable way to determine the final state. - Wait timeouts are approximate, // occur server-side, and cannot account for things such as caller delays and -// network latency from caller to service. - The system will auto-close a -// statement after one hour if the client stops polling and thus you must poll -// at least once an hour. - The results are only available for one hour after -// success; polling does not extend this. - The SQL Execution API must be used -// for the entire lifecycle of the statement. For example, you cannot use the -// Jobs API to execute the command, and then the SQL Execution API to cancel it. +// network latency from caller to service. - To guarantee that the statement is +// kept alive, you must poll at least once every 15 minutes. - The results are +// only available for one hour after success; polling does not extend this. - +// The SQL Execution API must be used for the entire lifecycle of the statement. +// For example, you cannot use the Jobs API to execute the command, and then the +// SQL Execution API to cancel it. // // [Apache Arrow Columnar]: https://arrow.apache.org/overview/ // [Databricks SQL Statement Execution API tutorial]: https://docs.databricks.com/sql/api/sql-execution-tutorial.html diff --git a/service/sql/model.go b/service/sql/model.go index 95b9d38aa..bf307030c 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -400,7 +400,7 @@ const ChannelNameChannelNameCustom ChannelName = `CHANNEL_NAME_CUSTOM` const ChannelNameChannelNamePreview ChannelName = `CHANNEL_NAME_PREVIEW` -const ChannelNameChannelNameUnspecified ChannelName = `CHANNEL_NAME_UNSPECIFIED` +const ChannelNameChannelNamePrevious ChannelName = `CHANNEL_NAME_PREVIOUS` // String representation for [fmt.Print] func (f *ChannelName) String() string { @@ -410,11 +410,11 @@ func (f *ChannelName) String() string { // Set raw string value and validate it against allowed values func (f *ChannelName) Set(v string) error { switch v { - case `CHANNEL_NAME_CURRENT`, `CHANNEL_NAME_CUSTOM`, `CHANNEL_NAME_PREVIEW`, `CHANNEL_NAME_UNSPECIFIED`: + case `CHANNEL_NAME_CURRENT`, `CHANNEL_NAME_CUSTOM`, `CHANNEL_NAME_PREVIEW`, `CHANNEL_NAME_PREVIOUS`: *f = ChannelName(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "CHANNEL_NAME_CURRENT", "CHANNEL_NAME_CUSTOM", "CHANNEL_NAME_PREVIEW", "CHANNEL_NAME_UNSPECIFIED"`, v) + return fmt.Errorf(`value "%s" is not one of "CHANNEL_NAME_CURRENT", "CHANNEL_NAME_CUSTOM", "CHANNEL_NAME_PREVIEW", "CHANNEL_NAME_PREVIOUS"`, v) } } diff --git a/version/version.go b/version/version.go index 0de626d47..244a301de 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.49.0" +const Version = "0.50.0" diff --git a/workspace_client.go b/workspace_client.go index a16249f6b..8aaa87f79 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -930,13 +930,13 @@ type WorkspaceClient struct { // arrives. Polling for status until a terminal state is reached is a // reliable way to determine the final state. - Wait timeouts are // approximate, occur server-side, and cannot account for things such as - // caller delays and network latency from caller to service. - The system - // will auto-close a statement after one hour if the client stops polling - // and thus you must poll at least once an hour. - The results are only - // available for one hour after success; polling does not extend this. - The - // SQL Execution API must be used for the entire lifecycle of the statement. - // For example, you cannot use the Jobs API to execute the command, and then - // the SQL Execution API to cancel it. + // caller delays and network latency from caller to service. - To guarantee + // that the statement is kept alive, you must poll at least once every 15 + // minutes. - The results are only available for one hour after success; + // polling does not extend this. - The SQL Execution API must be used for + // the entire lifecycle of the statement. For example, you cannot use the + // Jobs API to execute the command, and then the SQL Execution API to cancel + // it. // // [Apache Arrow Columnar]: https://arrow.apache.org/overview/ // [Databricks SQL Statement Execution API tutorial]: https://docs.databricks.com/sql/api/sql-execution-tutorial.html