From 4622182d49c1ac146da10b274b1f7ee6437e3b99 Mon Sep 17 00:00:00 2001 From: Arinda Arif <79823430+arinda-arif@users.noreply.github.com> Date: Fri, 8 Oct 2021 16:10:05 +0700 Subject: [PATCH] feat: add backup feature (#88) User can configure backup-related configurations in the project config yaml, for example: datastore: path: bigquery type: bigquery backup: dataset: optimus_backup ttl: 30 prefix: backup another datastore can have different backup configurations model. Backup via cli usage: optimus backup resource --project [project] --namespace [namespace] Adding backup list to show recent backups of a project. optimus backup list --project [project] --- Makefile | 2 +- api/handler/v1/runtime.go | 87 +- api/handler/v1/runtime_test.go | 775 ++++++++- api/proto/odpf/optimus/runtime_service.pb.go | 1337 ++++++++++------ .../odpf/optimus/runtime_service.pb.gw.go | 304 +++- .../odpf/optimus/runtime_service_grpc.pb.go | 72 + .../odpf/optimus/runtime_service.swagger.json | 218 ++- cmd/backup.go | 183 ++- cmd/server/server.go | 14 +- config/config.go | 12 +- datastore/service.go | 152 +- datastore/service_test.go | 1379 ++++++++++++++--- ext/datastore/bigquery/bigquery.go | 21 +- ext/datastore/bigquery/bigquery_test.go | 187 ++- ext/datastore/bigquery/mock.go | 56 + ext/datastore/bigquery/table.go | 132 +- ext/datastore/bigquery/table_test.go | 347 +++++ go.mod | 3 +- go.sum | 7 +- mock/backup.go | 28 + mock/datastore.go | 22 +- mock/job.go | 2 +- models/backup.go | 40 + models/datastore.go | 12 +- store/postgres/backup_repository.go | 117 ++ store/postgres/backup_repository_test.go | 131 ++ .../000014_create_backup_table.down.sql | 1 + .../000014_create_backup_table.up.sql | 8 + store/store.go | 6 + 29 files changed, 4908 insertions(+), 747 deletions(-) create mode 100644 mock/backup.go create mode 100644 store/postgres/backup_repository.go create mode 100644 store/postgres/backup_repository_test.go create mode 100644 store/postgres/migrations/000014_create_backup_table.down.sql create mode 100644 store/postgres/migrations/000014_create_backup_table.up.sql diff --git a/Makefile b/Makefile index 273bc22ab7..329e97d76e 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ NAME = "github.com/odpf/optimus" LAST_COMMIT := $(shell git rev-parse --short HEAD) LAST_TAG := "$(shell git rev-list --tags --max-count=1)" OPMS_VERSION := "$(shell git describe --tags ${LAST_TAG})-next" -PROTON_COMMIT := "ed46a61c267dc47e91d163fbd8f3f3367f272d23" +PROTON_COMMIT := "e3b3bee44b27c5cdfb9276ccda86af4a462614c3" all: build diff --git a/api/handler/v1/runtime.go b/api/handler/v1/runtime.go index cc50e02836..14123a943c 100644 --- a/api/handler/v1/runtime.go +++ b/api/handler/v1/runtime.go @@ -961,7 +961,16 @@ func (sv *RuntimeServiceServer) BackupDryRun(ctx context.Context, req *pb.Backup jobSpecs = append(jobSpecs, downstreamSpecs...) } - resourcesToBackup, err := sv.resourceSvc.BackupResourceDryRun(ctx, projectSpec, namespaceSpec, jobSpecs) + //should add config + backupRequest := models.BackupRequest{ + ResourceName: req.ResourceName, + Project: projectSpec, + Namespace: namespaceSpec, + Description: req.Description, + IgnoreDownstream: req.IgnoreDownstream, + DryRun: true, + } + resourcesToBackup, err := sv.resourceSvc.BackupResourceDryRun(ctx, backupRequest, jobSpecs) if err != nil { return nil, status.Errorf(codes.Internal, "error while doing backup dry run: %v", err) } @@ -971,6 +980,82 @@ func (sv *RuntimeServiceServer) BackupDryRun(ctx context.Context, req *pb.Backup }, nil } +func (sv *RuntimeServiceServer) Backup(ctx context.Context, req *pb.BackupRequest) (*pb.BackupResponse, error) { + projectSpec, err := sv.getProjectSpec(req.ProjectName) + if err != nil { + return nil, err + } + + namespaceRepo := sv.namespaceRepoFactory.New(projectSpec) + namespaceSpec, err := namespaceRepo.GetByName(req.Namespace) + if err != nil { + return nil, status.Errorf(codes.NotFound, "%s: namespace %s not found", err.Error(), req.Namespace) + } + + resourceSpec, err := sv.resourceSvc.ReadResource(ctx, namespaceSpec, req.DatastoreName, req.ResourceName) + if err != nil { + return nil, status.Errorf(codes.Internal, "%s: failed to read resource %s", err.Error(), req.ResourceName) + } + + var jobSpecs []models.JobSpec + jobSpec, err := sv.jobSvc.GetByDestination(projectSpec, resourceSpec.URN) + if err != nil { + return nil, status.Errorf(codes.Internal, "error while getting job: %v", err) + } + jobSpecs = append(jobSpecs, jobSpec) + + if !req.IgnoreDownstream { + downstreamSpecs, err := sv.jobSvc.GetDownstream(ctx, projectSpec, jobSpec.Name) + if err != nil { + return nil, status.Errorf(codes.Internal, "error while getting job downstream: %v", err) + } + jobSpecs = append(jobSpecs, downstreamSpecs...) + } + + backupRequest := models.BackupRequest{ + ResourceName: req.ResourceName, + Project: projectSpec, + Namespace: namespaceSpec, + Description: req.Description, + IgnoreDownstream: req.IgnoreDownstream, + DryRun: false, + Config: req.Config, + } + results, err := sv.resourceSvc.BackupResource(ctx, backupRequest, jobSpecs) + if err != nil { + return nil, status.Errorf(codes.Internal, "error while doing backup: %v", err) + } + + return &pb.BackupResponse{ + Urn: results, + }, nil +} + +func (sv *RuntimeServiceServer) ListBackups(ctx context.Context, req *pb.ListBackupsRequest) (*pb.ListBackupsResponse, error) { + projectSpec, err := sv.getProjectSpec(req.ProjectName) + if err != nil { + return nil, err + } + + results, err := sv.resourceSvc.ListBackupResources(projectSpec, req.DatastoreName) + if err != nil { + return nil, status.Errorf(codes.Internal, "error while getting backup list: %v", err) + } + + var backupList []*pb.BackupSpec + for _, result := range results { + backupList = append(backupList, &pb.BackupSpec{ + Id: result.ID.String(), + ResourceName: result.Resource.Name, + CreatedAt: timestamppb.New(result.CreatedAt), + Description: result.Description, + }) + } + return &pb.ListBackupsResponse{ + Backups: backupList, + }, nil +} + func (sv *RuntimeServiceServer) RunJob(ctx context.Context, req *pb.RunJobRequest) (*pb.RunJobResponse, error) { // create job run in db projSpec, err := sv.projectRepoFactory.New().GetByName(req.ProjectName) diff --git a/api/handler/v1/runtime_test.go b/api/handler/v1/runtime_test.go index e73abb91a6..587e8f013c 100644 --- a/api/handler/v1/runtime_test.go +++ b/api/handler/v1/runtime_test.go @@ -2558,7 +2558,14 @@ func TestRuntimeServiceServer(t *testing.T) { defer jobService.AssertExpectations(t) jobService.On("GetByDestination", projectSpec, resourceUrn).Return(jobSpec, nil) - resourceSvc.On("BackupResourceDryRun", context.Background(), projectSpec, namespaceSpec, []models.JobSpec{jobSpec}).Return([]string{resourceName}, nil) + backupRequest := models.BackupRequest{ + ResourceName: resourceName, + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: true, + DryRun: true, + } + resourceSvc.On("BackupResourceDryRun", context.Background(), backupRequest, []models.JobSpec{jobSpec}).Return([]string{resourceName}, nil) runtimeServiceServer := v1.NewRuntimeServiceServer( log, @@ -2649,9 +2656,15 @@ func TestRuntimeServiceServer(t *testing.T) { jobService := new(mock.JobService) defer jobService.AssertExpectations(t) jobService.On("GetByDestination", projectSpec, resourceUrn).Return(jobSpec, nil) - jobService.On("GetDownstream", projectSpec, jobSpec.Name).Return(jobSpecDownstreams, nil) + jobService.On("GetDownstream", context.Background(), projectSpec, jobSpec.Name).Return(jobSpecDownstreams, nil) - resourceSvc.On("BackupResourceDryRun", context.Background(), projectSpec, namespaceSpec, []models.JobSpec{jobSpec, jobSpecDownstreams[0], jobSpecDownstreams[1]}).Return([]string{resourceUrn, resourceDownstream1Urn, resourceDownstream2Urn}, nil) + backupRequest := models.BackupRequest{ + ResourceName: resourceName, + Project: projectSpec, + Namespace: namespaceSpec, + DryRun: true, + } + resourceSvc.On("BackupResourceDryRun", context.Background(), backupRequest, []models.JobSpec{jobSpec, jobSpecDownstreams[0], jobSpecDownstreams[1]}).Return([]string{resourceUrn, resourceDownstream1Urn, resourceDownstream2Urn}, nil) runtimeServiceServer := v1.NewRuntimeServiceServer( log, @@ -2926,7 +2939,7 @@ func TestRuntimeServiceServer(t *testing.T) { defer jobService.AssertExpectations(t) jobService.On("GetByDestination", projectSpec, resourceUrn).Return(jobSpec, nil) errorMsg := "unable to get jobspec downstream" - jobService.On("GetDownstream", projectSpec, jobSpec.Name).Return([]models.JobSpec{}, errors.New(errorMsg)) + jobService.On("GetDownstream", context.Background(), projectSpec, jobSpec.Name).Return([]models.JobSpec{}, errors.New(errorMsg)) runtimeServiceServer := v1.NewRuntimeServiceServer( log, @@ -3003,10 +3016,16 @@ func TestRuntimeServiceServer(t *testing.T) { jobService := new(mock.JobService) defer jobService.AssertExpectations(t) jobService.On("GetByDestination", projectSpec, resourceUrn).Return(jobSpec, nil) - jobService.On("GetDownstream", projectSpec, jobSpec.Name).Return([]models.JobSpec{}, nil) + jobService.On("GetDownstream", context.Background(), projectSpec, jobSpec.Name).Return([]models.JobSpec{}, nil) + backupRequest := models.BackupRequest{ + ResourceName: resourceName, + Project: projectSpec, + Namespace: namespaceSpec, + DryRun: true, + } errorMsg := "unable to get jobspec" - resourceSvc.On("BackupResourceDryRun", context.Background(), projectSpec, namespaceSpec, []models.JobSpec{jobSpec}).Return([]string{}, errors.New(errorMsg)) + resourceSvc.On("BackupResourceDryRun", context.Background(), backupRequest, []models.JobSpec{jobSpec}).Return([]string{}, errors.New(errorMsg)) runtimeServiceServer := v1.NewRuntimeServiceServer( log, @@ -3034,4 +3053,748 @@ func TestRuntimeServiceServer(t *testing.T) { assert.Nil(t, backupResponse) }) }) + + t.Run("Backup", func(t *testing.T) { + projectName := "a-data-project" + projectSpec := models.ProjectSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: projectName, + } + namespaceSpec := models.NamespaceSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "dev-test-namespace-1", + Config: map[string]string{ + "bucket": "gs://some_folder", + }, + ProjectSpec: projectSpec, + } + resourceName := "a-data-project:dataset.table" + resourceUrn := "datastore://a-data-project:dataset.table" + backupUrn := "datastore://a-data-project:optimus_backup.table_backup" + + t.Run("should able to do backup ignoring downstream", func(t *testing.T) { + projectRepository := new(mock.ProjectRepository) + defer projectRepository.AssertExpectations(t) + + projectRepoFactory := new(mock.ProjectRepoFactory) + defer projectRepoFactory.AssertExpectations(t) + + namespaceRepository := new(mock.NamespaceRepository) + defer namespaceRepository.AssertExpectations(t) + + namespaceRepoFact := new(mock.NamespaceRepoFactory) + defer namespaceRepoFact.AssertExpectations(t) + + resourceSvc := new(mock.DatastoreService) + defer resourceSvc.AssertExpectations(t) + + jobService := new(mock.JobService) + defer jobService.AssertExpectations(t) + + jobName := "a-data-job" + jobSpec := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: jobName, + Task: models.JobSpecTask{ + Config: models.JobSpecConfigs{ + { + Name: "do", + Value: "this", + }, + }, + }, + Assets: *models.JobAssets{}.New( + []models.JobSpecAsset{ + { + Name: "query.sql", + Value: "select * from 1", + }, + }), + } + resourceSpec := models.ResourceSpec{ + Name: resourceName, + URN: resourceUrn, + } + backupRequestPb := pb.BackupRequest{ + ProjectName: projectName, + DatastoreName: models.DestinationTypeBigquery.String(), + ResourceName: resourceName, + Namespace: namespaceSpec.Name, + IgnoreDownstream: true, + Config: map[string]string{ + "TTL": "30", + }, + } + backupReq := models.BackupRequest{ + ResourceName: resourceName, + Project: projectSpec, + Namespace: namespaceSpec, + Config: map[string]string{ + "TTL": "30", + }, + DryRun: false, + IgnoreDownstream: true, + } + backupResponsePb := &pb.BackupResponse{ + Urn: []string{backupUrn}, + } + + projectRepository.On("GetByName", projectName).Return(projectSpec, nil) + projectRepoFactory.On("New").Return(projectRepository) + + namespaceRepository.On("GetByName", namespaceSpec.Name).Return(namespaceSpec, nil) + namespaceRepoFact.On("New", projectSpec).Return(namespaceRepository) + + resourceSvc.On("ReadResource", context.Background(), namespaceSpec, models.DestinationTypeBigquery.String(), resourceName).Return(resourceSpec, nil) + jobService.On("GetByDestination", projectSpec, resourceUrn).Return(jobSpec, nil) + resourceSvc.On("BackupResource", context.Background(), backupReq, []models.JobSpec{jobSpec}).Return([]string{backupUrn}, nil) + + runtimeServiceServer := v1.NewRuntimeServiceServer( + log, + "Version", + jobService, nil, + resourceSvc, + projectRepoFactory, + namespaceRepoFact, + nil, + nil, + nil, + nil, + nil, + ) + backupResponse, err := runtimeServiceServer.Backup(context.Background(), &backupRequestPb) + + assert.Nil(t, err) + assert.Equal(t, backupResponsePb, backupResponse) + }) + t.Run("should return list of resources for backup with downstream", func(t *testing.T) { + projectRepository := new(mock.ProjectRepository) + defer projectRepository.AssertExpectations(t) + + projectRepoFactory := new(mock.ProjectRepoFactory) + defer projectRepoFactory.AssertExpectations(t) + + namespaceRepository := new(mock.NamespaceRepository) + defer namespaceRepository.AssertExpectations(t) + + namespaceRepoFact := new(mock.NamespaceRepoFactory) + defer namespaceRepoFact.AssertExpectations(t) + + resourceSvc := new(mock.DatastoreService) + defer resourceSvc.AssertExpectations(t) + + jobService := new(mock.JobService) + defer jobService.AssertExpectations(t) + + jobSpec := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "a-data-job", + } + jobSpecDownstreams := []models.JobSpec{ + { + ID: uuid.Must(uuid.NewRandom()), + Name: "b-data-job", + Task: models.JobSpecTask{ + Config: models.JobSpecConfigs{ + { + Name: "do", + Value: "this", + }, + }, + }, + }, + { + ID: uuid.Must(uuid.NewRandom()), + Name: "c-data-job", + Task: models.JobSpecTask{ + Config: models.JobSpecConfigs{ + { + Name: "do", + Value: "this", + }, + }, + }, + }, + } + resourceSpec := models.ResourceSpec{ + Name: resourceName, + URN: resourceUrn, + } + backupDownstream1Urn := "datastore://a-data-project:optimus_backup.downstream1" + backupDownstream2Urn := "datastore://a-data-project:optimus_backup.downstream2" + backupRequestPb := pb.BackupRequest{ + ProjectName: projectName, + DatastoreName: models.DestinationTypeBigquery.String(), + ResourceName: resourceName, + Namespace: namespaceSpec.Name, + IgnoreDownstream: false, + Config: map[string]string{ + "TTL": "30", + }, + } + backupReq := models.BackupRequest{ + ResourceName: resourceName, + Project: projectSpec, + Namespace: namespaceSpec, + Config: map[string]string{ + "TTL": "30", + }, + DryRun: false, + IgnoreDownstream: false, + } + backupResults := []string{backupUrn, backupDownstream1Urn, backupDownstream2Urn} + + projectRepository.On("GetByName", projectName).Return(projectSpec, nil) + projectRepoFactory.On("New").Return(projectRepository) + + namespaceRepository.On("GetByName", namespaceSpec.Name).Return(namespaceSpec, nil) + namespaceRepoFact.On("New", projectSpec).Return(namespaceRepository) + + resourceSvc.On("ReadResource", context.Background(), namespaceSpec, models.DestinationTypeBigquery.String(), resourceName).Return(resourceSpec, nil) + + jobService.On("GetByDestination", projectSpec, resourceUrn).Return(jobSpec, nil) + jobService.On("GetDownstream", context.Background(), projectSpec, jobSpec.Name).Return(jobSpecDownstreams, nil) + + resourceSvc.On("BackupResource", context.Background(), backupReq, []models.JobSpec{jobSpec, jobSpecDownstreams[0], jobSpecDownstreams[1]}).Return(backupResults, nil) + + runtimeServiceServer := v1.NewRuntimeServiceServer( + log, + "Version", + jobService, nil, + resourceSvc, + projectRepoFactory, + namespaceRepoFact, + nil, + nil, + nil, + nil, + nil, + ) + backupResponse, err := runtimeServiceServer.Backup(context.Background(), &backupRequestPb) + + assert.Nil(t, err) + assert.Equal(t, backupResults, backupResponse.Urn) + }) + t.Run("should return error when project is not found", func(t *testing.T) { + projectRepository := new(mock.ProjectRepository) + defer projectRepository.AssertExpectations(t) + + projectRepoFactory := new(mock.ProjectRepoFactory) + defer projectRepoFactory.AssertExpectations(t) + + namespaceRepoFact := new(mock.NamespaceRepoFactory) + defer namespaceRepoFact.AssertExpectations(t) + + jobService := new(mock.JobService) + defer jobService.AssertExpectations(t) + + resourceSvc := new(mock.DatastoreService) + defer resourceSvc.AssertExpectations(t) + + backupRequestPb := pb.BackupRequest{ + ProjectName: projectName, + DatastoreName: models.DestinationTypeBigquery.String(), + ResourceName: resourceName, + } + + projectRepoFactory.On("New").Return(projectRepository) + errorMsg := "unable to fetch project" + projectRepository.On("GetByName", projectName).Return(models.ProjectSpec{}, errors.New(errorMsg)) + + runtimeServiceServer := v1.NewRuntimeServiceServer( + log, + "Version", + jobService, nil, + resourceSvc, + projectRepoFactory, + namespaceRepoFact, + nil, + nil, + nil, + nil, + nil, + ) + backupResponse, err := runtimeServiceServer.Backup(context.Background(), &backupRequestPb) + + assert.Contains(t, err.Error(), errorMsg) + assert.Nil(t, backupResponse) + }) + t.Run("should return error when namespace is not found", func(t *testing.T) { + projectRepository := new(mock.ProjectRepository) + defer projectRepository.AssertExpectations(t) + + projectRepoFactory := new(mock.ProjectRepoFactory) + defer projectRepoFactory.AssertExpectations(t) + + namespaceRepository := new(mock.NamespaceRepository) + defer namespaceRepository.AssertExpectations(t) + + namespaceRepoFact := new(mock.NamespaceRepoFactory) + defer namespaceRepoFact.AssertExpectations(t) + + backupRequestPb := pb.BackupRequest{ + ProjectName: projectName, + DatastoreName: models.DestinationTypeBigquery.String(), + ResourceName: resourceName, + Namespace: namespaceSpec.Name, + } + + projectRepository.On("GetByName", projectName).Return(projectSpec, nil) + projectRepoFactory.On("New").Return(projectRepository) + errorMsg := "unable to get namespace" + namespaceRepository.On("GetByName", namespaceSpec.Name).Return(models.NamespaceSpec{}, errors.New(errorMsg)) + namespaceRepoFact.On("New", projectSpec).Return(namespaceRepository) + + runtimeServiceServer := v1.NewRuntimeServiceServer( + log, + "Version", + nil, + nil, + nil, + projectRepoFactory, + namespaceRepoFact, + nil, + nil, + nil, + nil, + nil, + ) + backupResponse, err := runtimeServiceServer.Backup(context.Background(), &backupRequestPb) + + assert.Contains(t, err.Error(), errorMsg) + assert.Nil(t, backupResponse) + }) + t.Run("should return error when unable to read resource", func(t *testing.T) { + projectRepository := new(mock.ProjectRepository) + defer projectRepository.AssertExpectations(t) + + projectRepoFactory := new(mock.ProjectRepoFactory) + defer projectRepoFactory.AssertExpectations(t) + + namespaceRepository := new(mock.NamespaceRepository) + defer namespaceRepository.AssertExpectations(t) + + namespaceRepoFact := new(mock.NamespaceRepoFactory) + defer namespaceRepoFact.AssertExpectations(t) + + resourceSvc := new(mock.DatastoreService) + defer resourceSvc.AssertExpectations(t) + + resourceSpec := models.ResourceSpec{ + Name: resourceName, + URN: resourceUrn, + } + backupRequestPb := pb.BackupRequest{ + ProjectName: projectName, + DatastoreName: models.DestinationTypeBigquery.String(), + ResourceName: resourceName, + Namespace: namespaceSpec.Name, + } + + projectRepository.On("GetByName", projectName).Return(projectSpec, nil) + projectRepoFactory.On("New").Return(projectRepository) + namespaceRepository.On("GetByName", namespaceSpec.Name).Return(namespaceSpec, nil) + namespaceRepoFact.On("New", projectSpec).Return(namespaceRepository) + errorMsg := "unable to read resource" + resourceSvc.On("ReadResource", context.Background(), namespaceSpec, + models.DestinationTypeBigquery.String(), resourceName).Return(resourceSpec, errors.New(errorMsg)) + + runtimeServiceServer := v1.NewRuntimeServiceServer( + log, + "Version", + nil, nil, + resourceSvc, + projectRepoFactory, + namespaceRepoFact, + nil, + nil, + nil, + nil, + nil, + ) + backupResponse, err := runtimeServiceServer.Backup(context.Background(), &backupRequestPb) + + assert.Contains(t, err.Error(), errorMsg) + assert.Nil(t, backupResponse) + }) + t.Run("should return error when unable to get jobSpec", func(t *testing.T) { + projectRepository := new(mock.ProjectRepository) + defer projectRepository.AssertExpectations(t) + + projectRepoFactory := new(mock.ProjectRepoFactory) + defer projectRepoFactory.AssertExpectations(t) + + namespaceRepository := new(mock.NamespaceRepository) + defer namespaceRepository.AssertExpectations(t) + + namespaceRepoFact := new(mock.NamespaceRepoFactory) + defer namespaceRepoFact.AssertExpectations(t) + + resourceSvc := new(mock.DatastoreService) + defer resourceSvc.AssertExpectations(t) + + jobService := new(mock.JobService) + defer jobService.AssertExpectations(t) + + resourceSpec := models.ResourceSpec{ + Name: resourceName, + URN: resourceUrn, + } + backupRequestPb := pb.BackupRequest{ + ProjectName: projectName, + DatastoreName: models.DestinationTypeBigquery.String(), + ResourceName: resourceName, + Namespace: namespaceSpec.Name, + } + + projectRepository.On("GetByName", projectName).Return(projectSpec, nil) + projectRepoFactory.On("New").Return(projectRepository) + namespaceRepository.On("GetByName", namespaceSpec.Name).Return(namespaceSpec, nil) + namespaceRepoFact.On("New", projectSpec).Return(namespaceRepository) + resourceSvc.On("ReadResource", context.Background(), namespaceSpec, models.DestinationTypeBigquery.String(), resourceName).Return(resourceSpec, nil) + errorMsg := "unable to get jobspec" + jobService.On("GetByDestination", projectSpec, resourceUrn).Return(models.JobSpec{}, errors.New(errorMsg)) + + runtimeServiceServer := v1.NewRuntimeServiceServer( + log, + "Version", + jobService, nil, + resourceSvc, + projectRepoFactory, + namespaceRepoFact, + nil, + nil, + nil, + nil, + nil, + ) + backupResponse, err := runtimeServiceServer.Backup(context.Background(), &backupRequestPb) + + assert.Contains(t, err.Error(), errorMsg) + assert.Nil(t, backupResponse) + }) + t.Run("should return error when unable to get jobSpec downstream", func(t *testing.T) { + projectRepository := new(mock.ProjectRepository) + defer projectRepository.AssertExpectations(t) + + projectRepoFactory := new(mock.ProjectRepoFactory) + defer projectRepoFactory.AssertExpectations(t) + + namespaceRepository := new(mock.NamespaceRepository) + defer namespaceRepository.AssertExpectations(t) + + namespaceRepoFact := new(mock.NamespaceRepoFactory) + defer namespaceRepoFact.AssertExpectations(t) + + resourceSvc := new(mock.DatastoreService) + defer resourceSvc.AssertExpectations(t) + + jobService := new(mock.JobService) + defer jobService.AssertExpectations(t) + + jobName := "a-data-job" + jobSpec := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: jobName, + Task: models.JobSpecTask{ + Config: models.JobSpecConfigs{ + { + Name: "do", + Value: "this", + }, + }, + }, + Assets: *models.JobAssets{}.New( + []models.JobSpecAsset{ + { + Name: "query.sql", + Value: "select * from 1", + }, + }), + } + resourceSpec := models.ResourceSpec{ + Name: resourceName, + URN: resourceUrn, + } + backupRequestPb := pb.BackupRequest{ + ProjectName: projectName, + DatastoreName: models.DestinationTypeBigquery.String(), + ResourceName: resourceName, + Namespace: namespaceSpec.Name, + } + + projectRepository.On("GetByName", projectName).Return(projectSpec, nil) + projectRepoFactory.On("New").Return(projectRepository) + namespaceRepository.On("GetByName", namespaceSpec.Name).Return(namespaceSpec, nil) + namespaceRepoFact.On("New", projectSpec).Return(namespaceRepository) + resourceSvc.On("ReadResource", context.Background(), namespaceSpec, models.DestinationTypeBigquery.String(), resourceName).Return(resourceSpec, nil) + jobService.On("GetByDestination", projectSpec, resourceUrn).Return(jobSpec, nil) + errorMsg := "unable to get jobspec downstream" + jobService.On("GetDownstream", context.Background(), projectSpec, jobSpec.Name).Return([]models.JobSpec{}, errors.New(errorMsg)) + + runtimeServiceServer := v1.NewRuntimeServiceServer( + log, + "Version", + jobService, nil, + resourceSvc, + projectRepoFactory, + namespaceRepoFact, + nil, + nil, + nil, + nil, + nil, + ) + backupResponse, err := runtimeServiceServer.Backup(context.Background(), &backupRequestPb) + + assert.Contains(t, err.Error(), errorMsg) + assert.Nil(t, backupResponse) + }) + t.Run("should return error when unable to do backup", func(t *testing.T) { + projectRepository := new(mock.ProjectRepository) + defer projectRepository.AssertExpectations(t) + + projectRepoFactory := new(mock.ProjectRepoFactory) + defer projectRepoFactory.AssertExpectations(t) + + namespaceRepository := new(mock.NamespaceRepository) + defer namespaceRepository.AssertExpectations(t) + + namespaceRepoFact := new(mock.NamespaceRepoFactory) + defer namespaceRepoFact.AssertExpectations(t) + + resourceSvc := new(mock.DatastoreService) + defer resourceSvc.AssertExpectations(t) + + jobService := new(mock.JobService) + defer jobService.AssertExpectations(t) + + jobName := "a-data-job" + jobSpec := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: jobName, + Task: models.JobSpecTask{ + Config: models.JobSpecConfigs{ + { + Name: "do", + Value: "this", + }, + }, + }, + Assets: *models.JobAssets{}.New( + []models.JobSpecAsset{ + { + Name: "query.sql", + Value: "select * from 1", + }, + }), + } + backupReq := models.BackupRequest{ + ResourceName: resourceName, + Project: projectSpec, + Namespace: namespaceSpec, + Config: map[string]string{ + "TTL": "30", + }, + DryRun: false, + IgnoreDownstream: false, + } + resourceSpec := models.ResourceSpec{ + Name: resourceName, + URN: resourceUrn, + } + backupRequestPb := pb.BackupRequest{ + ProjectName: projectName, + DatastoreName: models.DestinationTypeBigquery.String(), + ResourceName: resourceName, + Namespace: namespaceSpec.Name, + Config: map[string]string{ + "TTL": "30", + }, + } + + projectRepository.On("GetByName", projectName).Return(projectSpec, nil) + projectRepoFactory.On("New").Return(projectRepository) + namespaceRepository.On("GetByName", namespaceSpec.Name).Return(namespaceSpec, nil) + namespaceRepoFact.On("New", projectSpec).Return(namespaceRepository) + resourceSvc.On("ReadResource", context.Background(), namespaceSpec, models.DestinationTypeBigquery.String(), resourceName).Return(resourceSpec, nil) + jobService.On("GetByDestination", projectSpec, resourceUrn).Return(jobSpec, nil) + jobService.On("GetDownstream", context.Background(), projectSpec, jobSpec.Name).Return([]models.JobSpec{}, nil) + errorMsg := "unable to get jobspec" + resourceSvc.On("BackupResource", context.Background(), backupReq, []models.JobSpec{jobSpec}).Return([]string{}, errors.New(errorMsg)) + + runtimeServiceServer := v1.NewRuntimeServiceServer( + log, + "Version", + jobService, nil, + resourceSvc, + projectRepoFactory, + namespaceRepoFact, + nil, + nil, + nil, + nil, + nil, + ) + backupResponse, err := runtimeServiceServer.Backup(context.Background(), &backupRequestPb) + + assert.Contains(t, err.Error(), errorMsg) + assert.Nil(t, backupResponse) + }) + }) + + t.Run("ListBackups", func(t *testing.T) { + projectName := "a-data-project" + projectSpec := models.ProjectSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: projectName, + } + datastoreName := models.DestinationTypeBigquery.String() + namespaceSpec := models.NamespaceSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "dev-test-namespace-1", + Config: map[string]string{ + "bucket": "gs://some_folder", + }, + ProjectSpec: projectSpec, + } + listBackupsReq := pb.ListBackupsRequest{ + ProjectName: projectName, + DatastoreName: datastoreName, + Namespace: namespaceSpec.Name, + } + backupSpecs := []models.BackupSpec{ + { + ID: uuid.Must(uuid.NewRandom()), + CreatedAt: time.Now().Add(time.Hour * 24 * -30), + Resource: models.ResourceSpec{ + Name: "sample resource", + }, + Description: "backup purpose", + }, + { + ID: uuid.Must(uuid.NewRandom()), + CreatedAt: time.Now().Add(time.Hour * 24 * -50), + Resource: models.ResourceSpec{ + Name: "sample resource", + }, + Description: "backup purpose", + }, + } + t.Run("should return list of backups", func(t *testing.T) { + projectRepository := new(mock.ProjectRepository) + defer projectRepository.AssertExpectations(t) + + projectRepoFactory := new(mock.ProjectRepoFactory) + defer projectRepoFactory.AssertExpectations(t) + + resourceSvc := new(mock.DatastoreService) + defer resourceSvc.AssertExpectations(t) + + backupResultPb := &pb.ListBackupsResponse{ + Backups: []*pb.BackupSpec{ + { + Id: backupSpecs[0].ID.String(), + ResourceName: backupSpecs[0].Resource.Name, + CreatedAt: timestamppb.New(backupSpecs[0].CreatedAt), + Description: backupSpecs[0].Description, + }, + { + Id: backupSpecs[1].ID.String(), + ResourceName: backupSpecs[1].Resource.Name, + CreatedAt: timestamppb.New(backupSpecs[1].CreatedAt), + Description: backupSpecs[1].Description, + }, + }, + } + + projectRepoFactory.On("New").Return(projectRepository) + projectRepository.On("GetByName", projectName).Return(projectSpec, nil) + resourceSvc.On("ListBackupResources", projectSpec, datastoreName).Return(backupSpecs, nil) + + runtimeServiceServer := v1.NewRuntimeServiceServer( + log, + "Version", + nil, nil, + resourceSvc, + projectRepoFactory, + nil, + nil, + nil, + nil, + nil, + nil, + ) + backupResponse, err := runtimeServiceServer.ListBackups(context.Background(), &listBackupsReq) + + assert.Nil(t, err) + assert.Equal(t, backupResultPb, backupResponse) + }) + t.Run("should return error when unable to get project spec", func(t *testing.T) { + projectRepository := new(mock.ProjectRepository) + defer projectRepository.AssertExpectations(t) + + projectRepoFactory := new(mock.ProjectRepoFactory) + defer projectRepoFactory.AssertExpectations(t) + + resourceSvc := new(mock.DatastoreService) + defer resourceSvc.AssertExpectations(t) + + projectRepoFactory.On("New").Return(projectRepository) + errorMsg := "unable to get project spec" + projectRepository.On("GetByName", projectName).Return(models.ProjectSpec{}, + errors.New(errorMsg)) + + runtimeServiceServer := v1.NewRuntimeServiceServer( + log, + "Version", + nil, nil, + resourceSvc, + projectRepoFactory, + nil, + nil, + nil, + nil, + nil, + nil, + ) + backupResponse, err := runtimeServiceServer.ListBackups(context.Background(), &listBackupsReq) + + assert.Contains(t, err.Error(), errorMsg) + assert.Nil(t, backupResponse) + }) + t.Run("should return error when unable to get list of backups", func(t *testing.T) { + projectRepository := new(mock.ProjectRepository) + defer projectRepository.AssertExpectations(t) + + projectRepoFactory := new(mock.ProjectRepoFactory) + defer projectRepoFactory.AssertExpectations(t) + + resourceSvc := new(mock.DatastoreService) + defer resourceSvc.AssertExpectations(t) + + projectRepoFactory.On("New").Return(projectRepository) + projectRepository.On("GetByName", projectName).Return(projectSpec, nil) + errorMsg := "unable to get list of backups" + resourceSvc.On("ListBackupResources", projectSpec, datastoreName).Return([]models.BackupSpec{}, errors.New(errorMsg)) + + runtimeServiceServer := v1.NewRuntimeServiceServer( + log, + "Version", + nil, nil, + resourceSvc, + projectRepoFactory, + nil, + nil, + nil, + nil, + nil, + nil, + ) + backupResponse, err := runtimeServiceServer.ListBackups(context.Background(), &listBackupsReq) + + assert.Contains(t, err.Error(), errorMsg) + assert.Nil(t, backupResponse) + }) + }) } diff --git a/api/proto/odpf/optimus/runtime_service.pb.go b/api/proto/odpf/optimus/runtime_service.pb.go index 7db0d3dde1..0d612789e9 100644 --- a/api/proto/odpf/optimus/runtime_service.pb.go +++ b/api/proto/odpf/optimus/runtime_service.pb.go @@ -4808,6 +4808,329 @@ func (x *BackupDryRunResponse) GetResourceName() []string { return nil } +type BackupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName,proto3" json:"project_name,omitempty"` + DatastoreName string `protobuf:"bytes,2,opt,name=datastore_name,json=datastoreName,proto3" json:"datastore_name,omitempty"` + ResourceName string `protobuf:"bytes,3,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + Namespace string `protobuf:"bytes,4,opt,name=namespace,proto3" json:"namespace,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + IgnoreDownstream bool `protobuf:"varint,6,opt,name=ignore_downstream,json=ignoreDownstream,proto3" json:"ignore_downstream,omitempty"` + Config map[string]string `protobuf:"bytes,7,rep,name=config,proto3" json:"config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *BackupRequest) Reset() { + *x = BackupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BackupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupRequest) ProtoMessage() {} + +func (x *BackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[75] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupRequest.ProtoReflect.Descriptor instead. +func (*BackupRequest) Descriptor() ([]byte, []int) { + return file_odpf_optimus_runtime_service_proto_rawDescGZIP(), []int{75} +} + +func (x *BackupRequest) GetProjectName() string { + if x != nil { + return x.ProjectName + } + return "" +} + +func (x *BackupRequest) GetDatastoreName() string { + if x != nil { + return x.DatastoreName + } + return "" +} + +func (x *BackupRequest) GetResourceName() string { + if x != nil { + return x.ResourceName + } + return "" +} + +func (x *BackupRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *BackupRequest) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *BackupRequest) GetIgnoreDownstream() bool { + if x != nil { + return x.IgnoreDownstream + } + return false +} + +func (x *BackupRequest) GetConfig() map[string]string { + if x != nil { + return x.Config + } + return nil +} + +type BackupResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Urn []string `protobuf:"bytes,1,rep,name=urn,proto3" json:"urn,omitempty"` +} + +func (x *BackupResponse) Reset() { + *x = BackupResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BackupResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupResponse) ProtoMessage() {} + +func (x *BackupResponse) ProtoReflect() protoreflect.Message { + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[76] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupResponse.ProtoReflect.Descriptor instead. +func (*BackupResponse) Descriptor() ([]byte, []int) { + return file_odpf_optimus_runtime_service_proto_rawDescGZIP(), []int{76} +} + +func (x *BackupResponse) GetUrn() []string { + if x != nil { + return x.Urn + } + return nil +} + +type ListBackupsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName,proto3" json:"project_name,omitempty"` + DatastoreName string `protobuf:"bytes,2,opt,name=datastore_name,json=datastoreName,proto3" json:"datastore_name,omitempty"` + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` +} + +func (x *ListBackupsRequest) Reset() { + *x = ListBackupsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListBackupsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBackupsRequest) ProtoMessage() {} + +func (x *ListBackupsRequest) ProtoReflect() protoreflect.Message { + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[77] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBackupsRequest.ProtoReflect.Descriptor instead. +func (*ListBackupsRequest) Descriptor() ([]byte, []int) { + return file_odpf_optimus_runtime_service_proto_rawDescGZIP(), []int{77} +} + +func (x *ListBackupsRequest) GetProjectName() string { + if x != nil { + return x.ProjectName + } + return "" +} + +func (x *ListBackupsRequest) GetDatastoreName() string { + if x != nil { + return x.DatastoreName + } + return "" +} + +func (x *ListBackupsRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +type ListBackupsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Backups []*BackupSpec `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` +} + +func (x *ListBackupsResponse) Reset() { + *x = ListBackupsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[78] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListBackupsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBackupsResponse) ProtoMessage() {} + +func (x *ListBackupsResponse) ProtoReflect() protoreflect.Message { + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[78] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBackupsResponse.ProtoReflect.Descriptor instead. +func (*ListBackupsResponse) Descriptor() ([]byte, []int) { + return file_odpf_optimus_runtime_service_proto_rawDescGZIP(), []int{78} +} + +func (x *ListBackupsResponse) GetBackups() []*BackupSpec { + if x != nil { + return x.Backups + } + return nil +} + +type BackupSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *BackupSpec) Reset() { + *x = BackupSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BackupSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupSpec) ProtoMessage() {} + +func (x *BackupSpec) ProtoReflect() protoreflect.Message { + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[79] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupSpec.ProtoReflect.Descriptor instead. +func (*BackupSpec) Descriptor() ([]byte, []int) { + return file_odpf_optimus_runtime_service_proto_rawDescGZIP(), []int{79} +} + +func (x *BackupSpec) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *BackupSpec) GetResourceName() string { + if x != nil { + return x.ResourceName + } + return "" +} + +func (x *BackupSpec) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *BackupSpec) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + type ProjectSpecification_ProjectSecret struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4820,7 +5143,7 @@ type ProjectSpecification_ProjectSecret struct { func (x *ProjectSpecification_ProjectSecret) Reset() { *x = ProjectSpecification_ProjectSecret{} if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[76] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4833,7 +5156,7 @@ func (x *ProjectSpecification_ProjectSecret) String() string { func (*ProjectSpecification_ProjectSecret) ProtoMessage() {} func (x *ProjectSpecification_ProjectSecret) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[76] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[81] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4875,7 +5198,7 @@ type JobSpecification_Behavior struct { func (x *JobSpecification_Behavior) Reset() { *x = JobSpecification_Behavior{} if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[80] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4888,7 +5211,7 @@ func (x *JobSpecification_Behavior) String() string { func (*JobSpecification_Behavior) ProtoMessage() {} func (x *JobSpecification_Behavior) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[80] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[85] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4932,7 +5255,7 @@ type JobSpecification_Behavior_Retry struct { func (x *JobSpecification_Behavior_Retry) Reset() { *x = JobSpecification_Behavior_Retry{} if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[81] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4945,7 +5268,7 @@ func (x *JobSpecification_Behavior_Retry) String() string { func (*JobSpecification_Behavior_Retry) ProtoMessage() {} func (x *JobSpecification_Behavior_Retry) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[81] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[86] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4996,7 +5319,7 @@ type JobSpecification_Behavior_Notifiers struct { func (x *JobSpecification_Behavior_Notifiers) Reset() { *x = JobSpecification_Behavior_Notifiers{} if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[82] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5009,7 +5332,7 @@ func (x *JobSpecification_Behavior_Notifiers) String() string { func (*JobSpecification_Behavior_Notifiers) ProtoMessage() {} func (x *JobSpecification_Behavior_Notifiers) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[82] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[87] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5766,298 +6089,365 @@ var file_odpf_optimus_runtime_service_proto_rawDesc = []byte{ 0x6b, 0x75, 0x70, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x32, 0x9a, 0x23, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x74, 0x69, - 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x07, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x75, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, - 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x22, 0x0b, 0x2f, 0x76, 0x31, 0x2f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x01, 0x2a, 0x12, 0x77, 0x0a, 0x16, 0x44, 0x65, 0x70, - 0x6c, 0x6f, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, - 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x30, 0x01, 0x12, 0xb4, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, - 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, - 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, - 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x39, - 0x22, 0x34, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x3a, 0x01, 0x2a, 0x12, 0xb6, 0x01, 0x0a, 0x14, 0x52, 0x65, - 0x61, 0x64, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, - 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, - 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x61, - 0x64, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x41, 0x12, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x7d, 0x12, 0xbc, 0x01, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, - 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, - 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, - 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, - 0x2a, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x7d, 0x12, 0x95, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, + 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xe7, 0x02, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x64, + 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, + 0x65, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x12, 0x3f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x75, 0x73, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x22, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x03, 0x75, 0x72, 0x6e, 0x22, 0x7c, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, + 0x0e, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x22, 0x49, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x64, 0x70, + 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x53, 0x70, 0x65, 0x63, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x9e, 0x01, + 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0xd7, + 0x25, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x5e, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x6f, + 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6f, 0x64, 0x70, + 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x10, 0x22, 0x0b, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x01, + 0x2a, 0x12, 0x77, 0x0a, 0x16, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x6f, 0x64, + 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, + 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x4a, 0x6f, + 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0xb4, 0x01, 0x0a, 0x16, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, + 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x3f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x39, 0x22, 0x34, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x3a, 0x01, + 0x2a, 0x12, 0xb6, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x61, 0x64, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x6f, 0x64, 0x70, - 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4a, 0x6f, + 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, - 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, + 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x65, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x12, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x12, 0xa5, 0x01, 0x0a, 0x14, 0x44, 0x75, - 0x6d, 0x70, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, + 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0xbc, 0x01, 0x0a, 0x16, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, + 0x73, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x2a, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, + 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x95, 0x01, 0x0a, 0x14, 0x4c, 0x69, + 0x73, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, - 0x73, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, - 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x75, 0x6d, - 0x70, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x30, 0x12, 0x2e, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, + 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x20, 0x12, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, - 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x64, 0x75, 0x6d, - 0x70, 0x12, 0x9e, 0x01, 0x0a, 0x15, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x6f, 0x64, - 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, - 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, - 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x22, 0x24, 0x2f, 0x76, - 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x12, 0x77, 0x0a, 0x16, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x6f, - 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, 0x66, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, - 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x76, 0x0a, 0x0f, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, - 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x10, 0x22, 0x0b, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x3a, 0x01, 0x2a, 0x12, 0xaa, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x2d, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, + 0x62, 0x12, 0xa5, 0x01, 0x0a, 0x14, 0x44, 0x75, 0x6d, 0x70, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x6f, 0x64, 0x70, + 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4a, 0x6f, + 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x36, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x12, 0x2e, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x64, 0x75, 0x6d, 0x70, 0x12, 0x9e, 0x01, 0x0a, 0x15, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x75, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x26, 0x22, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, + 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x77, 0x0a, 0x16, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, + 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x30, 0x01, 0x12, 0x76, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6f, + 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x22, 0x0b, 0x2f, 0x76, 0x31, + 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3a, 0x01, 0x2a, 0x12, 0xaa, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2e, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x01, 0x2a, - 0x12, 0x97, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2d, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x22, 0x2f, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x3a, 0x01, 0x2a, 0x12, 0x6a, 0x0a, 0x0c, 0x4c, 0x69, - 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x21, 0x2e, 0x6f, 0x64, 0x70, - 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, - 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x13, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0d, 0x12, 0x0b, 0x2f, 0x76, 0x31, 0x2f, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9e, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, - 0x12, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6f, - 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x26, 0x12, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0xa0, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x25, 0x2e, 0x6f, - 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, - 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x86, 0x01, 0x0a, 0x09, 0x4a, - 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x32, 0x12, 0x30, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, - 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0xb3, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x4a, 0x6f, 0x62, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x25, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x4a, 0x6f, 0x62, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x26, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x50, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4a, 0x22, - 0x45, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, + 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, - 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x3a, 0x01, 0x2a, 0x12, 0x60, 0x0a, 0x09, 0x47, 0x65, 0x74, - 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x1e, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, - 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, - 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x12, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0c, 0x12, - 0x0a, 0x2f, 0x76, 0x31, 0x2f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x86, 0x01, 0x0a, 0x1b, - 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x2e, 0x6f, 0x64, - 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, - 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, - 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, - 0x6c, 0x6f, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x30, 0x01, 0x12, 0xda, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2e, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, - 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, - 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x56, 0x12, 0x54, 0x2f, 0x76, 0x31, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x97, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x6f, 0x64, + 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x65, 0x72, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x24, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x22, 0x2f, + 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x3a, + 0x01, 0x2a, 0x12, 0x6a, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x12, 0x21, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, + 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x13, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x0d, 0x12, 0x0b, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9e, + 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x12, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0xa0, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x12, 0x25, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x6f, 0x64, + 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, - 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0xbc, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x23, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6f, 0x64, 0x70, 0x66, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x22, 0x54, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x3a, + 0x01, 0x2a, 0x12, 0x86, 0x01, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x1e, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, + 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, + 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x38, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0xb3, 0x01, 0x0a, 0x10, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x12, 0x25, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4a, + 0x6f, 0x62, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x50, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4a, 0x22, 0x45, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x01, 0x2a, - 0x12, 0xc3, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0x21, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, - 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x66, - 0x12, 0x64, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x7b, 0x64, 0x61, - 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0xbc, 0x01, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x23, 0x2e, 0x6f, 0x64, 0x70, 0x66, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, - 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x1a, 0x54, 0x2f, 0x76, - 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, - 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, - 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x21, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, - 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, 0x52, - 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6f, 0x64, 0x70, 0x66, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, - 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x33, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x22, 0x28, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x7d, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x2f, 0x64, 0x72, 0x79, 0x72, 0x75, 0x6e, 0x3a, - 0x01, 0x2a, 0x12, 0x71, 0x0a, 0x06, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x12, 0x1b, 0x2e, 0x6f, - 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, - 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x6f, 0x64, 0x70, 0x66, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x22, - 0x21, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x70, 0x6c, - 0x61, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x8e, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x24, 0x2e, 0x6f, 0x64, 0x70, 0x66, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x25, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x47, - 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, - 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x61, - 0x79, 0x2f, 0x7b, 0x69, 0x64, 0x7d, 0x12, 0x7d, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x61, 0x79, 0x73, 0x12, 0x20, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, - 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, - 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, - 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x23, 0x12, 0x21, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, - 0x65, 0x70, 0x6c, 0x61, 0x79, 0x12, 0xd4, 0x01, 0x0a, 0x0c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x21, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, - 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x44, 0x72, 0x79, 0x52, - 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6f, 0x64, 0x70, 0x66, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x44, - 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7d, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x77, 0x22, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x7b, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x2f, 0x64, 0x72, 0x79, 0x72, 0x75, 0x6e, 0x3a, 0x01, 0x2a, 0x12, 0x84, 0x01, 0x0a, - 0x06, 0x52, 0x75, 0x6e, 0x4a, 0x6f, 0x62, 0x12, 0x1b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, - 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x75, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x75, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x3f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x39, 0x22, 0x34, 0x2f, 0x76, 0x31, 0x2f, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, + 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x3a, 0x01, + 0x2a, 0x12, 0x60, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x1e, + 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x47, 0x65, + 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, + 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x47, 0x65, + 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x12, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0c, 0x12, 0x0a, 0x2f, 0x76, 0x31, 0x2f, 0x77, 0x69, 0x6e, + 0x64, 0x6f, 0x77, 0x12, 0x86, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0xda, 0x01, 0x0a, + 0x19, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x2e, 0x6f, 0x64, 0x70, + 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x6f, 0x64, 0x70, + 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x56, 0x12, 0x54, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, + 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, + 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xbc, 0x01, 0x0a, 0x0e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x23, 0x2e, 0x6f, + 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x24, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x22, + 0x54, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, + 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xc3, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x61, + 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x21, 0x2e, 0x6f, 0x64, 0x70, 0x66, + 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6f, + 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x66, 0x12, 0x64, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x7b, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0xbc, + 0x01, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x23, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5f, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x59, 0x1a, 0x54, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, + 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x7d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x8a, 0x01, + 0x0a, 0x0c, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x21, + 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, + 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x22, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, + 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x33, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x22, 0x28, 0x2f, + 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, + 0x2f, 0x64, 0x72, 0x79, 0x72, 0x75, 0x6e, 0x3a, 0x01, 0x2a, 0x12, 0x71, 0x0a, 0x06, 0x52, 0x65, + 0x70, 0x6c, 0x61, 0x79, 0x12, 0x1b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, + 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x22, 0x21, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x8e, 0x01, + 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x24, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, + 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2e, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x2f, 0x7b, 0x69, 0x64, 0x7d, 0x12, 0x7d, + 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x73, 0x12, 0x20, 0x2e, + 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x72, 0x75, 0x6e, - 0x3a, 0x01, 0x2a, 0x42, 0x86, 0x01, 0x0a, 0x16, 0x69, 0x6f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x6e, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x42, 0x15, - 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x50, 0x01, 0x5a, 0x1e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x64, 0x70, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x6e, 0x2f, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x92, 0x41, 0x32, 0x12, 0x05, 0x32, 0x03, 0x30, 0x2e, - 0x31, 0x1a, 0x0e, 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x3a, 0x39, 0x31, 0x30, - 0x30, 0x22, 0x04, 0x2f, 0x61, 0x70, 0x69, 0x2a, 0x01, 0x01, 0x72, 0x10, 0x0a, 0x0e, 0x4f, 0x70, - 0x74, 0x69, 0x6d, 0x75, 0x73, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x12, 0xbb, 0x01, + 0x0a, 0x0c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x21, + 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x22, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, + 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5e, 0x22, 0x59, 0x2f, + 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, + 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x2d, 0x64, 0x72, 0x79, 0x72, 0x75, 0x6e, 0x3a, 0x01, 0x2a, 0x12, 0xa2, 0x01, 0x0a, 0x06, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x1b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x75, 0x73, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x5d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x57, 0x22, 0x52, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x3a, 0x01, 0x2a, + 0x12, 0xae, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, + 0x12, 0x20, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, + 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x54, 0x12, 0x52, 0x2f, + 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, + 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x12, 0x84, 0x01, 0x0a, 0x06, 0x52, 0x75, 0x6e, 0x4a, 0x6f, 0x62, 0x12, 0x1b, 0x2e, 0x6f, + 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x75, 0x6e, 0x4a, + 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x6f, 0x64, 0x70, 0x66, + 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x75, 0x6e, 0x4a, 0x6f, 0x62, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x39, 0x22, + 0x34, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x7d, 0x2f, 0x72, 0x75, 0x6e, 0x3a, 0x01, 0x2a, 0x42, 0x86, 0x01, 0x0a, 0x16, 0x69, 0x6f, 0x2e, + 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x6e, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x75, 0x73, 0x42, 0x15, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x50, 0x01, 0x5a, 0x1e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x64, 0x70, 0x66, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x92, 0x41, 0x32, 0x12, + 0x05, 0x32, 0x03, 0x30, 0x2e, 0x31, 0x1a, 0x0e, 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, + 0x31, 0x3a, 0x39, 0x31, 0x30, 0x30, 0x22, 0x04, 0x2f, 0x61, 0x70, 0x69, 0x2a, 0x01, 0x01, 0x72, + 0x10, 0x0a, 0x0e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -6073,7 +6463,7 @@ func file_odpf_optimus_runtime_service_proto_rawDescGZIP() []byte { } var file_odpf_optimus_runtime_service_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_odpf_optimus_runtime_service_proto_msgTypes = make([]protoimpl.MessageInfo, 88) +var file_odpf_optimus_runtime_service_proto_msgTypes = make([]protoimpl.MessageInfo, 94) var file_odpf_optimus_runtime_service_proto_goTypes = []interface{}{ (InstanceSpec_Type)(0), // 0: odpf.optimus.InstanceSpec.Type (InstanceSpecData_Type)(0), // 1: odpf.optimus.InstanceSpecData.Type @@ -6153,156 +6543,169 @@ var file_odpf_optimus_runtime_service_proto_goTypes = []interface{}{ (*RunJobResponse)(nil), // 75: odpf.optimus.RunJobResponse (*BackupDryRunRequest)(nil), // 76: odpf.optimus.BackupDryRunRequest (*BackupDryRunResponse)(nil), // 77: odpf.optimus.BackupDryRunResponse - nil, // 78: odpf.optimus.ProjectSpecification.ConfigEntry - (*ProjectSpecification_ProjectSecret)(nil), // 79: odpf.optimus.ProjectSpecification.ProjectSecret - nil, // 80: odpf.optimus.NamespaceSpecification.ConfigEntry - nil, // 81: odpf.optimus.JobSpecification.AssetsEntry - nil, // 82: odpf.optimus.JobSpecification.LabelsEntry - (*JobSpecification_Behavior)(nil), // 83: odpf.optimus.JobSpecification.Behavior - (*JobSpecification_Behavior_Retry)(nil), // 84: odpf.optimus.JobSpecification.Behavior.Retry - (*JobSpecification_Behavior_Notifiers)(nil), // 85: odpf.optimus.JobSpecification.Behavior.Notifiers - nil, // 86: odpf.optimus.JobSpecification.Behavior.Notifiers.ConfigEntry - nil, // 87: odpf.optimus.InstanceContext.EnvsEntry - nil, // 88: odpf.optimus.InstanceContext.FilesEntry - nil, // 89: odpf.optimus.ResourceSpecification.AssetsEntry - nil, // 90: odpf.optimus.ResourceSpecification.LabelsEntry - (*timestamppb.Timestamp)(nil), // 91: google.protobuf.Timestamp - (*structpb.Struct)(nil), // 92: google.protobuf.Struct - (*durationpb.Duration)(nil), // 93: google.protobuf.Duration + (*BackupRequest)(nil), // 78: odpf.optimus.BackupRequest + (*BackupResponse)(nil), // 79: odpf.optimus.BackupResponse + (*ListBackupsRequest)(nil), // 80: odpf.optimus.ListBackupsRequest + (*ListBackupsResponse)(nil), // 81: odpf.optimus.ListBackupsResponse + (*BackupSpec)(nil), // 82: odpf.optimus.BackupSpec + nil, // 83: odpf.optimus.ProjectSpecification.ConfigEntry + (*ProjectSpecification_ProjectSecret)(nil), // 84: odpf.optimus.ProjectSpecification.ProjectSecret + nil, // 85: odpf.optimus.NamespaceSpecification.ConfigEntry + nil, // 86: odpf.optimus.JobSpecification.AssetsEntry + nil, // 87: odpf.optimus.JobSpecification.LabelsEntry + (*JobSpecification_Behavior)(nil), // 88: odpf.optimus.JobSpecification.Behavior + (*JobSpecification_Behavior_Retry)(nil), // 89: odpf.optimus.JobSpecification.Behavior.Retry + (*JobSpecification_Behavior_Notifiers)(nil), // 90: odpf.optimus.JobSpecification.Behavior.Notifiers + nil, // 91: odpf.optimus.JobSpecification.Behavior.Notifiers.ConfigEntry + nil, // 92: odpf.optimus.InstanceContext.EnvsEntry + nil, // 93: odpf.optimus.InstanceContext.FilesEntry + nil, // 94: odpf.optimus.ResourceSpecification.AssetsEntry + nil, // 95: odpf.optimus.ResourceSpecification.LabelsEntry + nil, // 96: odpf.optimus.BackupRequest.ConfigEntry + (*timestamppb.Timestamp)(nil), // 97: google.protobuf.Timestamp + (*structpb.Struct)(nil), // 98: google.protobuf.Struct + (*durationpb.Duration)(nil), // 99: google.protobuf.Duration } var file_odpf_optimus_runtime_service_proto_depIdxs = []int32{ - 78, // 0: odpf.optimus.ProjectSpecification.config:type_name -> odpf.optimus.ProjectSpecification.ConfigEntry - 79, // 1: odpf.optimus.ProjectSpecification.secrets:type_name -> odpf.optimus.ProjectSpecification.ProjectSecret - 80, // 2: odpf.optimus.NamespaceSpecification.config:type_name -> odpf.optimus.NamespaceSpecification.ConfigEntry - 7, // 3: odpf.optimus.JobSpecHook.config:type_name -> odpf.optimus.JobConfigItem - 7, // 4: odpf.optimus.JobSpecification.config:type_name -> odpf.optimus.JobConfigItem - 8, // 5: odpf.optimus.JobSpecification.dependencies:type_name -> odpf.optimus.JobDependency - 81, // 6: odpf.optimus.JobSpecification.assets:type_name -> odpf.optimus.JobSpecification.AssetsEntry - 5, // 7: odpf.optimus.JobSpecification.hooks:type_name -> odpf.optimus.JobSpecHook - 82, // 8: odpf.optimus.JobSpecification.labels:type_name -> odpf.optimus.JobSpecification.LabelsEntry - 83, // 9: odpf.optimus.JobSpecification.behavior:type_name -> odpf.optimus.JobSpecification.Behavior - 10, // 10: odpf.optimus.InstanceSpec.data:type_name -> odpf.optimus.InstanceSpecData - 91, // 11: odpf.optimus.InstanceSpec.executed_at:type_name -> google.protobuf.Timestamp - 0, // 12: odpf.optimus.InstanceSpec.type:type_name -> odpf.optimus.InstanceSpec.Type - 1, // 13: odpf.optimus.InstanceSpecData.type:type_name -> odpf.optimus.InstanceSpecData.Type - 87, // 14: odpf.optimus.InstanceContext.envs:type_name -> odpf.optimus.InstanceContext.EnvsEntry - 88, // 15: odpf.optimus.InstanceContext.files:type_name -> odpf.optimus.InstanceContext.FilesEntry - 91, // 16: odpf.optimus.JobStatus.scheduled_at:type_name -> google.protobuf.Timestamp - 2, // 17: odpf.optimus.JobEvent.type:type_name -> odpf.optimus.JobEvent.Type - 92, // 18: odpf.optimus.JobEvent.value:type_name -> google.protobuf.Struct - 93, // 19: odpf.optimus.TaskWindow.size:type_name -> google.protobuf.Duration - 93, // 20: odpf.optimus.TaskWindow.offset:type_name -> google.protobuf.Duration - 92, // 21: odpf.optimus.ResourceSpecification.spec:type_name -> google.protobuf.Struct - 89, // 22: odpf.optimus.ResourceSpecification.assets:type_name -> odpf.optimus.ResourceSpecification.AssetsEntry - 90, // 23: odpf.optimus.ResourceSpecification.labels:type_name -> odpf.optimus.ResourceSpecification.LabelsEntry - 6, // 24: odpf.optimus.DeployJobSpecificationRequest.jobs:type_name -> odpf.optimus.JobSpecification - 6, // 25: odpf.optimus.ListJobSpecificationResponse.jobs:type_name -> odpf.optimus.JobSpecification - 6, // 26: odpf.optimus.CheckJobSpecificationRequest.job:type_name -> odpf.optimus.JobSpecification - 6, // 27: odpf.optimus.CheckJobSpecificationsRequest.jobs:type_name -> odpf.optimus.JobSpecification - 3, // 28: odpf.optimus.RegisterProjectRequest.project:type_name -> odpf.optimus.ProjectSpecification - 4, // 29: odpf.optimus.RegisterProjectRequest.namespace:type_name -> odpf.optimus.NamespaceSpecification - 4, // 30: odpf.optimus.RegisterProjectNamespaceRequest.namespace:type_name -> odpf.optimus.NamespaceSpecification - 6, // 31: odpf.optimus.CreateJobSpecificationRequest.spec:type_name -> odpf.optimus.JobSpecification - 6, // 32: odpf.optimus.ReadJobSpecificationResponse.spec:type_name -> odpf.optimus.JobSpecification - 3, // 33: odpf.optimus.ListProjectsResponse.projects:type_name -> odpf.optimus.ProjectSpecification - 4, // 34: odpf.optimus.ListProjectNamespacesResponse.namespaces:type_name -> odpf.optimus.NamespaceSpecification - 91, // 35: odpf.optimus.RegisterInstanceRequest.scheduled_at:type_name -> google.protobuf.Timestamp - 0, // 36: odpf.optimus.RegisterInstanceRequest.instance_type:type_name -> odpf.optimus.InstanceSpec.Type - 3, // 37: odpf.optimus.RegisterInstanceResponse.project:type_name -> odpf.optimus.ProjectSpecification - 4, // 38: odpf.optimus.RegisterInstanceResponse.namespace:type_name -> odpf.optimus.NamespaceSpecification - 6, // 39: odpf.optimus.RegisterInstanceResponse.job:type_name -> odpf.optimus.JobSpecification - 9, // 40: odpf.optimus.RegisterInstanceResponse.instance:type_name -> odpf.optimus.InstanceSpec - 11, // 41: odpf.optimus.RegisterInstanceResponse.context:type_name -> odpf.optimus.InstanceContext - 12, // 42: odpf.optimus.JobStatusResponse.statuses:type_name -> odpf.optimus.JobStatus - 91, // 43: odpf.optimus.GetWindowRequest.scheduled_at:type_name -> google.protobuf.Timestamp - 91, // 44: odpf.optimus.GetWindowResponse.start:type_name -> google.protobuf.Timestamp - 91, // 45: odpf.optimus.GetWindowResponse.end:type_name -> google.protobuf.Timestamp - 15, // 46: odpf.optimus.DeployResourceSpecificationRequest.resources:type_name -> odpf.optimus.ResourceSpecification - 15, // 47: odpf.optimus.ListResourceSpecificationResponse.resources:type_name -> odpf.optimus.ResourceSpecification - 15, // 48: odpf.optimus.CreateResourceRequest.resource:type_name -> odpf.optimus.ResourceSpecification - 15, // 49: odpf.optimus.ReadResourceResponse.resource:type_name -> odpf.optimus.ResourceSpecification - 15, // 50: odpf.optimus.UpdateResourceRequest.resource:type_name -> odpf.optimus.ResourceSpecification - 64, // 51: odpf.optimus.ReplayDryRunResponse.response:type_name -> odpf.optimus.ReplayExecutionTreeNode - 64, // 52: odpf.optimus.ReplayExecutionTreeNode.dependents:type_name -> odpf.optimus.ReplayExecutionTreeNode - 91, // 53: odpf.optimus.ReplayExecutionTreeNode.runs:type_name -> google.protobuf.Timestamp - 66, // 54: odpf.optimus.GetReplayStatusResponse.response:type_name -> odpf.optimus.ReplayStatusTreeNode - 66, // 55: odpf.optimus.ReplayStatusTreeNode.dependents:type_name -> odpf.optimus.ReplayStatusTreeNode - 67, // 56: odpf.optimus.ReplayStatusTreeNode.runs:type_name -> odpf.optimus.ReplayStatusRun - 91, // 57: odpf.optimus.ReplayStatusRun.run:type_name -> google.protobuf.Timestamp - 13, // 58: odpf.optimus.RegisterJobEventRequest.event:type_name -> odpf.optimus.JobEvent - 73, // 59: odpf.optimus.ListReplaysResponse.replay_list:type_name -> odpf.optimus.ReplaySpec - 91, // 60: odpf.optimus.ReplaySpec.start_date:type_name -> google.protobuf.Timestamp - 91, // 61: odpf.optimus.ReplaySpec.end_date:type_name -> google.protobuf.Timestamp - 91, // 62: odpf.optimus.ReplaySpec.created_at:type_name -> google.protobuf.Timestamp - 6, // 63: odpf.optimus.RunJobRequest.specifications:type_name -> odpf.optimus.JobSpecification - 84, // 64: odpf.optimus.JobSpecification.Behavior.retry:type_name -> odpf.optimus.JobSpecification.Behavior.Retry - 85, // 65: odpf.optimus.JobSpecification.Behavior.notify:type_name -> odpf.optimus.JobSpecification.Behavior.Notifiers - 93, // 66: odpf.optimus.JobSpecification.Behavior.Retry.delay:type_name -> google.protobuf.Duration - 2, // 67: odpf.optimus.JobSpecification.Behavior.Notifiers.on:type_name -> odpf.optimus.JobEvent.Type - 86, // 68: odpf.optimus.JobSpecification.Behavior.Notifiers.config:type_name -> odpf.optimus.JobSpecification.Behavior.Notifiers.ConfigEntry - 16, // 69: odpf.optimus.RuntimeService.Version:input_type -> odpf.optimus.VersionRequest - 18, // 70: odpf.optimus.RuntimeService.DeployJobSpecification:input_type -> odpf.optimus.DeployJobSpecificationRequest - 32, // 71: odpf.optimus.RuntimeService.CreateJobSpecification:input_type -> odpf.optimus.CreateJobSpecificationRequest - 34, // 72: odpf.optimus.RuntimeService.ReadJobSpecification:input_type -> odpf.optimus.ReadJobSpecificationRequest - 36, // 73: odpf.optimus.RuntimeService.DeleteJobSpecification:input_type -> odpf.optimus.DeleteJobSpecificationRequest - 20, // 74: odpf.optimus.RuntimeService.ListJobSpecification:input_type -> odpf.optimus.ListJobSpecificationRequest - 22, // 75: odpf.optimus.RuntimeService.DumpJobSpecification:input_type -> odpf.optimus.DumpJobSpecificationRequest - 24, // 76: odpf.optimus.RuntimeService.CheckJobSpecification:input_type -> odpf.optimus.CheckJobSpecificationRequest - 26, // 77: odpf.optimus.RuntimeService.CheckJobSpecifications:input_type -> odpf.optimus.CheckJobSpecificationsRequest - 28, // 78: odpf.optimus.RuntimeService.RegisterProject:input_type -> odpf.optimus.RegisterProjectRequest - 30, // 79: odpf.optimus.RuntimeService.RegisterProjectNamespace:input_type -> odpf.optimus.RegisterProjectNamespaceRequest - 38, // 80: odpf.optimus.RuntimeService.RegisterSecret:input_type -> odpf.optimus.RegisterSecretRequest - 40, // 81: odpf.optimus.RuntimeService.ListProjects:input_type -> odpf.optimus.ListProjectsRequest - 42, // 82: odpf.optimus.RuntimeService.ListProjectNamespaces:input_type -> odpf.optimus.ListProjectNamespacesRequest - 44, // 83: odpf.optimus.RuntimeService.RegisterInstance:input_type -> odpf.optimus.RegisterInstanceRequest - 46, // 84: odpf.optimus.RuntimeService.JobStatus:input_type -> odpf.optimus.JobStatusRequest - 69, // 85: odpf.optimus.RuntimeService.RegisterJobEvent:input_type -> odpf.optimus.RegisterJobEventRequest - 48, // 86: odpf.optimus.RuntimeService.GetWindow:input_type -> odpf.optimus.GetWindowRequest - 50, // 87: odpf.optimus.RuntimeService.DeployResourceSpecification:input_type -> odpf.optimus.DeployResourceSpecificationRequest - 52, // 88: odpf.optimus.RuntimeService.ListResourceSpecification:input_type -> odpf.optimus.ListResourceSpecificationRequest - 54, // 89: odpf.optimus.RuntimeService.CreateResource:input_type -> odpf.optimus.CreateResourceRequest - 56, // 90: odpf.optimus.RuntimeService.ReadResource:input_type -> odpf.optimus.ReadResourceRequest - 58, // 91: odpf.optimus.RuntimeService.UpdateResource:input_type -> odpf.optimus.UpdateResourceRequest - 62, // 92: odpf.optimus.RuntimeService.ReplayDryRun:input_type -> odpf.optimus.ReplayDryRunRequest - 60, // 93: odpf.optimus.RuntimeService.Replay:input_type -> odpf.optimus.ReplayRequest - 68, // 94: odpf.optimus.RuntimeService.GetReplayStatus:input_type -> odpf.optimus.GetReplayStatusRequest - 71, // 95: odpf.optimus.RuntimeService.ListReplays:input_type -> odpf.optimus.ListReplaysRequest - 76, // 96: odpf.optimus.RuntimeService.BackupDryRun:input_type -> odpf.optimus.BackupDryRunRequest - 74, // 97: odpf.optimus.RuntimeService.RunJob:input_type -> odpf.optimus.RunJobRequest - 17, // 98: odpf.optimus.RuntimeService.Version:output_type -> odpf.optimus.VersionResponse - 19, // 99: odpf.optimus.RuntimeService.DeployJobSpecification:output_type -> odpf.optimus.DeployJobSpecificationResponse - 33, // 100: odpf.optimus.RuntimeService.CreateJobSpecification:output_type -> odpf.optimus.CreateJobSpecificationResponse - 35, // 101: odpf.optimus.RuntimeService.ReadJobSpecification:output_type -> odpf.optimus.ReadJobSpecificationResponse - 37, // 102: odpf.optimus.RuntimeService.DeleteJobSpecification:output_type -> odpf.optimus.DeleteJobSpecificationResponse - 21, // 103: odpf.optimus.RuntimeService.ListJobSpecification:output_type -> odpf.optimus.ListJobSpecificationResponse - 23, // 104: odpf.optimus.RuntimeService.DumpJobSpecification:output_type -> odpf.optimus.DumpJobSpecificationResponse - 25, // 105: odpf.optimus.RuntimeService.CheckJobSpecification:output_type -> odpf.optimus.CheckJobSpecificationResponse - 27, // 106: odpf.optimus.RuntimeService.CheckJobSpecifications:output_type -> odpf.optimus.CheckJobSpecificationsResponse - 29, // 107: odpf.optimus.RuntimeService.RegisterProject:output_type -> odpf.optimus.RegisterProjectResponse - 31, // 108: odpf.optimus.RuntimeService.RegisterProjectNamespace:output_type -> odpf.optimus.RegisterProjectNamespaceResponse - 39, // 109: odpf.optimus.RuntimeService.RegisterSecret:output_type -> odpf.optimus.RegisterSecretResponse - 41, // 110: odpf.optimus.RuntimeService.ListProjects:output_type -> odpf.optimus.ListProjectsResponse - 43, // 111: odpf.optimus.RuntimeService.ListProjectNamespaces:output_type -> odpf.optimus.ListProjectNamespacesResponse - 45, // 112: odpf.optimus.RuntimeService.RegisterInstance:output_type -> odpf.optimus.RegisterInstanceResponse - 47, // 113: odpf.optimus.RuntimeService.JobStatus:output_type -> odpf.optimus.JobStatusResponse - 70, // 114: odpf.optimus.RuntimeService.RegisterJobEvent:output_type -> odpf.optimus.RegisterJobEventResponse - 49, // 115: odpf.optimus.RuntimeService.GetWindow:output_type -> odpf.optimus.GetWindowResponse - 51, // 116: odpf.optimus.RuntimeService.DeployResourceSpecification:output_type -> odpf.optimus.DeployResourceSpecificationResponse - 53, // 117: odpf.optimus.RuntimeService.ListResourceSpecification:output_type -> odpf.optimus.ListResourceSpecificationResponse - 55, // 118: odpf.optimus.RuntimeService.CreateResource:output_type -> odpf.optimus.CreateResourceResponse - 57, // 119: odpf.optimus.RuntimeService.ReadResource:output_type -> odpf.optimus.ReadResourceResponse - 59, // 120: odpf.optimus.RuntimeService.UpdateResource:output_type -> odpf.optimus.UpdateResourceResponse - 63, // 121: odpf.optimus.RuntimeService.ReplayDryRun:output_type -> odpf.optimus.ReplayDryRunResponse - 61, // 122: odpf.optimus.RuntimeService.Replay:output_type -> odpf.optimus.ReplayResponse - 65, // 123: odpf.optimus.RuntimeService.GetReplayStatus:output_type -> odpf.optimus.GetReplayStatusResponse - 72, // 124: odpf.optimus.RuntimeService.ListReplays:output_type -> odpf.optimus.ListReplaysResponse - 77, // 125: odpf.optimus.RuntimeService.BackupDryRun:output_type -> odpf.optimus.BackupDryRunResponse - 75, // 126: odpf.optimus.RuntimeService.RunJob:output_type -> odpf.optimus.RunJobResponse - 98, // [98:127] is the sub-list for method output_type - 69, // [69:98] is the sub-list for method input_type - 69, // [69:69] is the sub-list for extension type_name - 69, // [69:69] is the sub-list for extension extendee - 0, // [0:69] is the sub-list for field type_name + 83, // 0: odpf.optimus.ProjectSpecification.config:type_name -> odpf.optimus.ProjectSpecification.ConfigEntry + 84, // 1: odpf.optimus.ProjectSpecification.secrets:type_name -> odpf.optimus.ProjectSpecification.ProjectSecret + 85, // 2: odpf.optimus.NamespaceSpecification.config:type_name -> odpf.optimus.NamespaceSpecification.ConfigEntry + 7, // 3: odpf.optimus.JobSpecHook.config:type_name -> odpf.optimus.JobConfigItem + 7, // 4: odpf.optimus.JobSpecification.config:type_name -> odpf.optimus.JobConfigItem + 8, // 5: odpf.optimus.JobSpecification.dependencies:type_name -> odpf.optimus.JobDependency + 86, // 6: odpf.optimus.JobSpecification.assets:type_name -> odpf.optimus.JobSpecification.AssetsEntry + 5, // 7: odpf.optimus.JobSpecification.hooks:type_name -> odpf.optimus.JobSpecHook + 87, // 8: odpf.optimus.JobSpecification.labels:type_name -> odpf.optimus.JobSpecification.LabelsEntry + 88, // 9: odpf.optimus.JobSpecification.behavior:type_name -> odpf.optimus.JobSpecification.Behavior + 10, // 10: odpf.optimus.InstanceSpec.data:type_name -> odpf.optimus.InstanceSpecData + 97, // 11: odpf.optimus.InstanceSpec.executed_at:type_name -> google.protobuf.Timestamp + 0, // 12: odpf.optimus.InstanceSpec.type:type_name -> odpf.optimus.InstanceSpec.Type + 1, // 13: odpf.optimus.InstanceSpecData.type:type_name -> odpf.optimus.InstanceSpecData.Type + 92, // 14: odpf.optimus.InstanceContext.envs:type_name -> odpf.optimus.InstanceContext.EnvsEntry + 93, // 15: odpf.optimus.InstanceContext.files:type_name -> odpf.optimus.InstanceContext.FilesEntry + 97, // 16: odpf.optimus.JobStatus.scheduled_at:type_name -> google.protobuf.Timestamp + 2, // 17: odpf.optimus.JobEvent.type:type_name -> odpf.optimus.JobEvent.Type + 98, // 18: odpf.optimus.JobEvent.value:type_name -> google.protobuf.Struct + 99, // 19: odpf.optimus.TaskWindow.size:type_name -> google.protobuf.Duration + 99, // 20: odpf.optimus.TaskWindow.offset:type_name -> google.protobuf.Duration + 98, // 21: odpf.optimus.ResourceSpecification.spec:type_name -> google.protobuf.Struct + 94, // 22: odpf.optimus.ResourceSpecification.assets:type_name -> odpf.optimus.ResourceSpecification.AssetsEntry + 95, // 23: odpf.optimus.ResourceSpecification.labels:type_name -> odpf.optimus.ResourceSpecification.LabelsEntry + 6, // 24: odpf.optimus.DeployJobSpecificationRequest.jobs:type_name -> odpf.optimus.JobSpecification + 6, // 25: odpf.optimus.ListJobSpecificationResponse.jobs:type_name -> odpf.optimus.JobSpecification + 6, // 26: odpf.optimus.CheckJobSpecificationRequest.job:type_name -> odpf.optimus.JobSpecification + 6, // 27: odpf.optimus.CheckJobSpecificationsRequest.jobs:type_name -> odpf.optimus.JobSpecification + 3, // 28: odpf.optimus.RegisterProjectRequest.project:type_name -> odpf.optimus.ProjectSpecification + 4, // 29: odpf.optimus.RegisterProjectRequest.namespace:type_name -> odpf.optimus.NamespaceSpecification + 4, // 30: odpf.optimus.RegisterProjectNamespaceRequest.namespace:type_name -> odpf.optimus.NamespaceSpecification + 6, // 31: odpf.optimus.CreateJobSpecificationRequest.spec:type_name -> odpf.optimus.JobSpecification + 6, // 32: odpf.optimus.ReadJobSpecificationResponse.spec:type_name -> odpf.optimus.JobSpecification + 3, // 33: odpf.optimus.ListProjectsResponse.projects:type_name -> odpf.optimus.ProjectSpecification + 4, // 34: odpf.optimus.ListProjectNamespacesResponse.namespaces:type_name -> odpf.optimus.NamespaceSpecification + 97, // 35: odpf.optimus.RegisterInstanceRequest.scheduled_at:type_name -> google.protobuf.Timestamp + 0, // 36: odpf.optimus.RegisterInstanceRequest.instance_type:type_name -> odpf.optimus.InstanceSpec.Type + 3, // 37: odpf.optimus.RegisterInstanceResponse.project:type_name -> odpf.optimus.ProjectSpecification + 4, // 38: odpf.optimus.RegisterInstanceResponse.namespace:type_name -> odpf.optimus.NamespaceSpecification + 6, // 39: odpf.optimus.RegisterInstanceResponse.job:type_name -> odpf.optimus.JobSpecification + 9, // 40: odpf.optimus.RegisterInstanceResponse.instance:type_name -> odpf.optimus.InstanceSpec + 11, // 41: odpf.optimus.RegisterInstanceResponse.context:type_name -> odpf.optimus.InstanceContext + 12, // 42: odpf.optimus.JobStatusResponse.statuses:type_name -> odpf.optimus.JobStatus + 97, // 43: odpf.optimus.GetWindowRequest.scheduled_at:type_name -> google.protobuf.Timestamp + 97, // 44: odpf.optimus.GetWindowResponse.start:type_name -> google.protobuf.Timestamp + 97, // 45: odpf.optimus.GetWindowResponse.end:type_name -> google.protobuf.Timestamp + 15, // 46: odpf.optimus.DeployResourceSpecificationRequest.resources:type_name -> odpf.optimus.ResourceSpecification + 15, // 47: odpf.optimus.ListResourceSpecificationResponse.resources:type_name -> odpf.optimus.ResourceSpecification + 15, // 48: odpf.optimus.CreateResourceRequest.resource:type_name -> odpf.optimus.ResourceSpecification + 15, // 49: odpf.optimus.ReadResourceResponse.resource:type_name -> odpf.optimus.ResourceSpecification + 15, // 50: odpf.optimus.UpdateResourceRequest.resource:type_name -> odpf.optimus.ResourceSpecification + 64, // 51: odpf.optimus.ReplayDryRunResponse.response:type_name -> odpf.optimus.ReplayExecutionTreeNode + 64, // 52: odpf.optimus.ReplayExecutionTreeNode.dependents:type_name -> odpf.optimus.ReplayExecutionTreeNode + 97, // 53: odpf.optimus.ReplayExecutionTreeNode.runs:type_name -> google.protobuf.Timestamp + 66, // 54: odpf.optimus.GetReplayStatusResponse.response:type_name -> odpf.optimus.ReplayStatusTreeNode + 66, // 55: odpf.optimus.ReplayStatusTreeNode.dependents:type_name -> odpf.optimus.ReplayStatusTreeNode + 67, // 56: odpf.optimus.ReplayStatusTreeNode.runs:type_name -> odpf.optimus.ReplayStatusRun + 97, // 57: odpf.optimus.ReplayStatusRun.run:type_name -> google.protobuf.Timestamp + 13, // 58: odpf.optimus.RegisterJobEventRequest.event:type_name -> odpf.optimus.JobEvent + 73, // 59: odpf.optimus.ListReplaysResponse.replay_list:type_name -> odpf.optimus.ReplaySpec + 97, // 60: odpf.optimus.ReplaySpec.start_date:type_name -> google.protobuf.Timestamp + 97, // 61: odpf.optimus.ReplaySpec.end_date:type_name -> google.protobuf.Timestamp + 97, // 62: odpf.optimus.ReplaySpec.created_at:type_name -> google.protobuf.Timestamp + 6, // 63: odpf.optimus.RunJobRequest.specifications:type_name -> odpf.optimus.JobSpecification + 96, // 64: odpf.optimus.BackupRequest.config:type_name -> odpf.optimus.BackupRequest.ConfigEntry + 82, // 65: odpf.optimus.ListBackupsResponse.backups:type_name -> odpf.optimus.BackupSpec + 97, // 66: odpf.optimus.BackupSpec.created_at:type_name -> google.protobuf.Timestamp + 89, // 67: odpf.optimus.JobSpecification.Behavior.retry:type_name -> odpf.optimus.JobSpecification.Behavior.Retry + 90, // 68: odpf.optimus.JobSpecification.Behavior.notify:type_name -> odpf.optimus.JobSpecification.Behavior.Notifiers + 99, // 69: odpf.optimus.JobSpecification.Behavior.Retry.delay:type_name -> google.protobuf.Duration + 2, // 70: odpf.optimus.JobSpecification.Behavior.Notifiers.on:type_name -> odpf.optimus.JobEvent.Type + 91, // 71: odpf.optimus.JobSpecification.Behavior.Notifiers.config:type_name -> odpf.optimus.JobSpecification.Behavior.Notifiers.ConfigEntry + 16, // 72: odpf.optimus.RuntimeService.Version:input_type -> odpf.optimus.VersionRequest + 18, // 73: odpf.optimus.RuntimeService.DeployJobSpecification:input_type -> odpf.optimus.DeployJobSpecificationRequest + 32, // 74: odpf.optimus.RuntimeService.CreateJobSpecification:input_type -> odpf.optimus.CreateJobSpecificationRequest + 34, // 75: odpf.optimus.RuntimeService.ReadJobSpecification:input_type -> odpf.optimus.ReadJobSpecificationRequest + 36, // 76: odpf.optimus.RuntimeService.DeleteJobSpecification:input_type -> odpf.optimus.DeleteJobSpecificationRequest + 20, // 77: odpf.optimus.RuntimeService.ListJobSpecification:input_type -> odpf.optimus.ListJobSpecificationRequest + 22, // 78: odpf.optimus.RuntimeService.DumpJobSpecification:input_type -> odpf.optimus.DumpJobSpecificationRequest + 24, // 79: odpf.optimus.RuntimeService.CheckJobSpecification:input_type -> odpf.optimus.CheckJobSpecificationRequest + 26, // 80: odpf.optimus.RuntimeService.CheckJobSpecifications:input_type -> odpf.optimus.CheckJobSpecificationsRequest + 28, // 81: odpf.optimus.RuntimeService.RegisterProject:input_type -> odpf.optimus.RegisterProjectRequest + 30, // 82: odpf.optimus.RuntimeService.RegisterProjectNamespace:input_type -> odpf.optimus.RegisterProjectNamespaceRequest + 38, // 83: odpf.optimus.RuntimeService.RegisterSecret:input_type -> odpf.optimus.RegisterSecretRequest + 40, // 84: odpf.optimus.RuntimeService.ListProjects:input_type -> odpf.optimus.ListProjectsRequest + 42, // 85: odpf.optimus.RuntimeService.ListProjectNamespaces:input_type -> odpf.optimus.ListProjectNamespacesRequest + 44, // 86: odpf.optimus.RuntimeService.RegisterInstance:input_type -> odpf.optimus.RegisterInstanceRequest + 46, // 87: odpf.optimus.RuntimeService.JobStatus:input_type -> odpf.optimus.JobStatusRequest + 69, // 88: odpf.optimus.RuntimeService.RegisterJobEvent:input_type -> odpf.optimus.RegisterJobEventRequest + 48, // 89: odpf.optimus.RuntimeService.GetWindow:input_type -> odpf.optimus.GetWindowRequest + 50, // 90: odpf.optimus.RuntimeService.DeployResourceSpecification:input_type -> odpf.optimus.DeployResourceSpecificationRequest + 52, // 91: odpf.optimus.RuntimeService.ListResourceSpecification:input_type -> odpf.optimus.ListResourceSpecificationRequest + 54, // 92: odpf.optimus.RuntimeService.CreateResource:input_type -> odpf.optimus.CreateResourceRequest + 56, // 93: odpf.optimus.RuntimeService.ReadResource:input_type -> odpf.optimus.ReadResourceRequest + 58, // 94: odpf.optimus.RuntimeService.UpdateResource:input_type -> odpf.optimus.UpdateResourceRequest + 62, // 95: odpf.optimus.RuntimeService.ReplayDryRun:input_type -> odpf.optimus.ReplayDryRunRequest + 60, // 96: odpf.optimus.RuntimeService.Replay:input_type -> odpf.optimus.ReplayRequest + 68, // 97: odpf.optimus.RuntimeService.GetReplayStatus:input_type -> odpf.optimus.GetReplayStatusRequest + 71, // 98: odpf.optimus.RuntimeService.ListReplays:input_type -> odpf.optimus.ListReplaysRequest + 76, // 99: odpf.optimus.RuntimeService.BackupDryRun:input_type -> odpf.optimus.BackupDryRunRequest + 78, // 100: odpf.optimus.RuntimeService.Backup:input_type -> odpf.optimus.BackupRequest + 80, // 101: odpf.optimus.RuntimeService.ListBackups:input_type -> odpf.optimus.ListBackupsRequest + 74, // 102: odpf.optimus.RuntimeService.RunJob:input_type -> odpf.optimus.RunJobRequest + 17, // 103: odpf.optimus.RuntimeService.Version:output_type -> odpf.optimus.VersionResponse + 19, // 104: odpf.optimus.RuntimeService.DeployJobSpecification:output_type -> odpf.optimus.DeployJobSpecificationResponse + 33, // 105: odpf.optimus.RuntimeService.CreateJobSpecification:output_type -> odpf.optimus.CreateJobSpecificationResponse + 35, // 106: odpf.optimus.RuntimeService.ReadJobSpecification:output_type -> odpf.optimus.ReadJobSpecificationResponse + 37, // 107: odpf.optimus.RuntimeService.DeleteJobSpecification:output_type -> odpf.optimus.DeleteJobSpecificationResponse + 21, // 108: odpf.optimus.RuntimeService.ListJobSpecification:output_type -> odpf.optimus.ListJobSpecificationResponse + 23, // 109: odpf.optimus.RuntimeService.DumpJobSpecification:output_type -> odpf.optimus.DumpJobSpecificationResponse + 25, // 110: odpf.optimus.RuntimeService.CheckJobSpecification:output_type -> odpf.optimus.CheckJobSpecificationResponse + 27, // 111: odpf.optimus.RuntimeService.CheckJobSpecifications:output_type -> odpf.optimus.CheckJobSpecificationsResponse + 29, // 112: odpf.optimus.RuntimeService.RegisterProject:output_type -> odpf.optimus.RegisterProjectResponse + 31, // 113: odpf.optimus.RuntimeService.RegisterProjectNamespace:output_type -> odpf.optimus.RegisterProjectNamespaceResponse + 39, // 114: odpf.optimus.RuntimeService.RegisterSecret:output_type -> odpf.optimus.RegisterSecretResponse + 41, // 115: odpf.optimus.RuntimeService.ListProjects:output_type -> odpf.optimus.ListProjectsResponse + 43, // 116: odpf.optimus.RuntimeService.ListProjectNamespaces:output_type -> odpf.optimus.ListProjectNamespacesResponse + 45, // 117: odpf.optimus.RuntimeService.RegisterInstance:output_type -> odpf.optimus.RegisterInstanceResponse + 47, // 118: odpf.optimus.RuntimeService.JobStatus:output_type -> odpf.optimus.JobStatusResponse + 70, // 119: odpf.optimus.RuntimeService.RegisterJobEvent:output_type -> odpf.optimus.RegisterJobEventResponse + 49, // 120: odpf.optimus.RuntimeService.GetWindow:output_type -> odpf.optimus.GetWindowResponse + 51, // 121: odpf.optimus.RuntimeService.DeployResourceSpecification:output_type -> odpf.optimus.DeployResourceSpecificationResponse + 53, // 122: odpf.optimus.RuntimeService.ListResourceSpecification:output_type -> odpf.optimus.ListResourceSpecificationResponse + 55, // 123: odpf.optimus.RuntimeService.CreateResource:output_type -> odpf.optimus.CreateResourceResponse + 57, // 124: odpf.optimus.RuntimeService.ReadResource:output_type -> odpf.optimus.ReadResourceResponse + 59, // 125: odpf.optimus.RuntimeService.UpdateResource:output_type -> odpf.optimus.UpdateResourceResponse + 63, // 126: odpf.optimus.RuntimeService.ReplayDryRun:output_type -> odpf.optimus.ReplayDryRunResponse + 61, // 127: odpf.optimus.RuntimeService.Replay:output_type -> odpf.optimus.ReplayResponse + 65, // 128: odpf.optimus.RuntimeService.GetReplayStatus:output_type -> odpf.optimus.GetReplayStatusResponse + 72, // 129: odpf.optimus.RuntimeService.ListReplays:output_type -> odpf.optimus.ListReplaysResponse + 77, // 130: odpf.optimus.RuntimeService.BackupDryRun:output_type -> odpf.optimus.BackupDryRunResponse + 79, // 131: odpf.optimus.RuntimeService.Backup:output_type -> odpf.optimus.BackupResponse + 81, // 132: odpf.optimus.RuntimeService.ListBackups:output_type -> odpf.optimus.ListBackupsResponse + 75, // 133: odpf.optimus.RuntimeService.RunJob:output_type -> odpf.optimus.RunJobResponse + 103, // [103:134] is the sub-list for method output_type + 72, // [72:103] is the sub-list for method input_type + 72, // [72:72] is the sub-list for extension type_name + 72, // [72:72] is the sub-list for extension extendee + 0, // [0:72] is the sub-list for field type_name } func init() { file_odpf_optimus_runtime_service_proto_init() } @@ -7211,7 +7614,67 @@ func file_odpf_optimus_runtime_service_proto_init() { return nil } } + file_odpf_optimus_runtime_service_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } file_odpf_optimus_runtime_service_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackupResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_odpf_optimus_runtime_service_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListBackupsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_odpf_optimus_runtime_service_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListBackupsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_odpf_optimus_runtime_service_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackupSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_odpf_optimus_runtime_service_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProjectSpecification_ProjectSecret); i { case 0: return &v.state @@ -7223,7 +7686,7 @@ func file_odpf_optimus_runtime_service_proto_init() { return nil } } - file_odpf_optimus_runtime_service_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { + file_odpf_optimus_runtime_service_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*JobSpecification_Behavior); i { case 0: return &v.state @@ -7235,7 +7698,7 @@ func file_odpf_optimus_runtime_service_proto_init() { return nil } } - file_odpf_optimus_runtime_service_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { + file_odpf_optimus_runtime_service_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*JobSpecification_Behavior_Retry); i { case 0: return &v.state @@ -7247,7 +7710,7 @@ func file_odpf_optimus_runtime_service_proto_init() { return nil } } - file_odpf_optimus_runtime_service_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { + file_odpf_optimus_runtime_service_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*JobSpecification_Behavior_Notifiers); i { case 0: return &v.state @@ -7266,7 +7729,7 @@ func file_odpf_optimus_runtime_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_odpf_optimus_runtime_service_proto_rawDesc, NumEnums: 3, - NumMessages: 88, + NumMessages: 94, NumExtensions: 0, NumServices: 1, }, diff --git a/api/proto/odpf/optimus/runtime_service.pb.gw.go b/api/proto/odpf/optimus/runtime_service.pb.gw.go index cf43c44b2a..7d77acda77 100644 --- a/api/proto/odpf/optimus/runtime_service.pb.gw.go +++ b/api/proto/odpf/optimus/runtime_service.pb.gw.go @@ -1878,23 +1878,67 @@ func request_RuntimeService_BackupDryRun_0(ctx context.Context, marshaler runtim return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "datastore_name", err) } - val, ok = pathParams["resource_name"] + msg, err := client.BackupDryRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_RuntimeService_BackupDryRun_0(ctx context.Context, marshaler runtime.Marshaler, server RuntimeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BackupDryRunRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["project_name"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "resource_name") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "project_name") } - protoReq.ResourceName, err = runtime.String(val) + protoReq.ProjectName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "resource_name", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "project_name", err) } - msg, err := client.BackupDryRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["datastore_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "datastore_name") + } + + protoReq.DatastoreName, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "datastore_name", err) + } + + msg, err := server.BackupDryRun(ctx, &protoReq) return msg, metadata, err } -func local_request_RuntimeService_BackupDryRun_0(ctx context.Context, marshaler runtime.Marshaler, server RuntimeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BackupDryRunRequest +func request_RuntimeService_Backup_0(ctx context.Context, marshaler runtime.Marshaler, client RuntimeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BackupRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) @@ -1942,17 +1986,153 @@ func local_request_RuntimeService_BackupDryRun_0(ctx context.Context, marshaler return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "datastore_name", err) } - val, ok = pathParams["resource_name"] + msg, err := client.Backup(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_RuntimeService_Backup_0(ctx context.Context, marshaler runtime.Marshaler, server RuntimeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BackupRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["project_name"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "resource_name") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "project_name") } - protoReq.ResourceName, err = runtime.String(val) + protoReq.ProjectName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "resource_name", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "project_name", err) } - msg, err := server.BackupDryRun(ctx, &protoReq) + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["datastore_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "datastore_name") + } + + protoReq.DatastoreName, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "datastore_name", err) + } + + msg, err := server.Backup(ctx, &protoReq) + return msg, metadata, err + +} + +func request_RuntimeService_ListBackups_0(ctx context.Context, marshaler runtime.Marshaler, client RuntimeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListBackupsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["project_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "project_name") + } + + protoReq.ProjectName, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "project_name", err) + } + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["datastore_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "datastore_name") + } + + protoReq.DatastoreName, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "datastore_name", err) + } + + msg, err := client.ListBackups(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_RuntimeService_ListBackups_0(ctx context.Context, marshaler runtime.Marshaler, server RuntimeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListBackupsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["project_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "project_name") + } + + protoReq.ProjectName, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "project_name", err) + } + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["datastore_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "datastore_name") + } + + protoReq.DatastoreName, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "datastore_name", err) + } + + msg, err := server.ListBackups(ctx, &protoReq) return msg, metadata, err } @@ -2609,7 +2789,7 @@ func RegisterRuntimeServiceHandlerServer(ctx context.Context, mux *runtime.Serve var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/odpf.optimus.RuntimeService/BackupDryRun", runtime.WithHTTPPathPattern("/v1/project/{project_name}/namespace/{namespace}/datastore/{datastore_name}/resource/{resource_name}/backup/dryrun")) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/odpf.optimus.RuntimeService/BackupDryRun", runtime.WithHTTPPathPattern("/v1/project/{project_name}/namespace/{namespace}/datastore/{datastore_name}/backup-dryrun")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -2626,6 +2806,52 @@ func RegisterRuntimeServiceHandlerServer(ctx context.Context, mux *runtime.Serve }) + mux.Handle("POST", pattern_RuntimeService_Backup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/odpf.optimus.RuntimeService/Backup", runtime.WithHTTPPathPattern("/v1/project/{project_name}/namespace/{namespace}/datastore/{datastore_name}/backup")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_RuntimeService_Backup_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_RuntimeService_Backup_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_RuntimeService_ListBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/odpf.optimus.RuntimeService/ListBackups", runtime.WithHTTPPathPattern("/v1/project/{project_name}/namespace/{namespace}/datastore/{datastore_name}/backup")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_RuntimeService_ListBackups_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_RuntimeService_ListBackups_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("POST", pattern_RuntimeService_RunJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -3174,7 +3400,7 @@ func RegisterRuntimeServiceHandlerClient(ctx context.Context, mux *runtime.Serve ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req, "/odpf.optimus.RuntimeService/BackupDryRun", runtime.WithHTTPPathPattern("/v1/project/{project_name}/namespace/{namespace}/datastore/{datastore_name}/resource/{resource_name}/backup/dryrun")) + rctx, err := runtime.AnnotateContext(ctx, mux, req, "/odpf.optimus.RuntimeService/BackupDryRun", runtime.WithHTTPPathPattern("/v1/project/{project_name}/namespace/{namespace}/datastore/{datastore_name}/backup-dryrun")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -3190,6 +3416,46 @@ func RegisterRuntimeServiceHandlerClient(ctx context.Context, mux *runtime.Serve }) + mux.Handle("POST", pattern_RuntimeService_Backup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req, "/odpf.optimus.RuntimeService/Backup", runtime.WithHTTPPathPattern("/v1/project/{project_name}/namespace/{namespace}/datastore/{datastore_name}/backup")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_RuntimeService_Backup_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_RuntimeService_Backup_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_RuntimeService_ListBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req, "/odpf.optimus.RuntimeService/ListBackups", runtime.WithHTTPPathPattern("/v1/project/{project_name}/namespace/{namespace}/datastore/{datastore_name}/backup")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_RuntimeService_ListBackups_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_RuntimeService_ListBackups_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("POST", pattern_RuntimeService_RunJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -3262,7 +3528,11 @@ var ( pattern_RuntimeService_ListReplays_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1", "project", "project_name", "replay"}, "")) - pattern_RuntimeService_BackupDryRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 2, 9}, []string{"v1", "project", "project_name", "namespace", "datastore", "datastore_name", "resource", "resource_name", "backup", "dryrun"}, "")) + pattern_RuntimeService_BackupDryRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"v1", "project", "project_name", "namespace", "datastore", "datastore_name", "backup-dryrun"}, "")) + + pattern_RuntimeService_Backup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"v1", "project", "project_name", "namespace", "datastore", "datastore_name", "backup"}, "")) + + pattern_RuntimeService_ListBackups_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"v1", "project", "project_name", "namespace", "datastore", "datastore_name", "backup"}, "")) pattern_RuntimeService_RunJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "project", "project_name", "namespace", "run"}, "")) ) @@ -3318,5 +3588,9 @@ var ( forward_RuntimeService_BackupDryRun_0 = runtime.ForwardResponseMessage + forward_RuntimeService_Backup_0 = runtime.ForwardResponseMessage + + forward_RuntimeService_ListBackups_0 = runtime.ForwardResponseMessage + forward_RuntimeService_RunJob_0 = runtime.ForwardResponseMessage ) diff --git a/api/proto/odpf/optimus/runtime_service_grpc.pb.go b/api/proto/odpf/optimus/runtime_service_grpc.pb.go index b123781c1c..4691dcf2a1 100644 --- a/api/proto/odpf/optimus/runtime_service_grpc.pb.go +++ b/api/proto/odpf/optimus/runtime_service_grpc.pb.go @@ -78,6 +78,8 @@ type RuntimeServiceClient interface { GetReplayStatus(ctx context.Context, in *GetReplayStatusRequest, opts ...grpc.CallOption) (*GetReplayStatusResponse, error) ListReplays(ctx context.Context, in *ListReplaysRequest, opts ...grpc.CallOption) (*ListReplaysResponse, error) BackupDryRun(ctx context.Context, in *BackupDryRunRequest, opts ...grpc.CallOption) (*BackupDryRunResponse, error) + Backup(ctx context.Context, in *BackupRequest, opts ...grpc.CallOption) (*BackupResponse, error) + ListBackups(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) // RunJob creates a job run and executes all included tasks/hooks instantly // this doesn't necessarily deploy the job in db first RunJob(ctx context.Context, in *RunJobRequest, opts ...grpc.CallOption) (*RunJobResponse, error) @@ -412,6 +414,24 @@ func (c *runtimeServiceClient) BackupDryRun(ctx context.Context, in *BackupDryRu return out, nil } +func (c *runtimeServiceClient) Backup(ctx context.Context, in *BackupRequest, opts ...grpc.CallOption) (*BackupResponse, error) { + out := new(BackupResponse) + err := c.cc.Invoke(ctx, "/odpf.optimus.RuntimeService/Backup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) ListBackups(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) { + out := new(ListBackupsResponse) + err := c.cc.Invoke(ctx, "/odpf.optimus.RuntimeService/ListBackups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *runtimeServiceClient) RunJob(ctx context.Context, in *RunJobRequest, opts ...grpc.CallOption) (*RunJobResponse, error) { out := new(RunJobResponse) err := c.cc.Invoke(ctx, "/odpf.optimus.RuntimeService/RunJob", in, out, opts...) @@ -485,6 +505,8 @@ type RuntimeServiceServer interface { GetReplayStatus(context.Context, *GetReplayStatusRequest) (*GetReplayStatusResponse, error) ListReplays(context.Context, *ListReplaysRequest) (*ListReplaysResponse, error) BackupDryRun(context.Context, *BackupDryRunRequest) (*BackupDryRunResponse, error) + Backup(context.Context, *BackupRequest) (*BackupResponse, error) + ListBackups(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error) // RunJob creates a job run and executes all included tasks/hooks instantly // this doesn't necessarily deploy the job in db first RunJob(context.Context, *RunJobRequest) (*RunJobResponse, error) @@ -579,6 +601,12 @@ func (UnimplementedRuntimeServiceServer) ListReplays(context.Context, *ListRepla func (UnimplementedRuntimeServiceServer) BackupDryRun(context.Context, *BackupDryRunRequest) (*BackupDryRunResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BackupDryRun not implemented") } +func (UnimplementedRuntimeServiceServer) Backup(context.Context, *BackupRequest) (*BackupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Backup not implemented") +} +func (UnimplementedRuntimeServiceServer) ListBackups(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListBackups not implemented") +} func (UnimplementedRuntimeServiceServer) RunJob(context.Context, *RunJobRequest) (*RunJobResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RunJob not implemented") } @@ -1108,6 +1136,42 @@ func _RuntimeService_BackupDryRun_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _RuntimeService_Backup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BackupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).Backup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/odpf.optimus.RuntimeService/Backup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).Backup(ctx, req.(*BackupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_ListBackups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBackupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).ListBackups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/odpf.optimus.RuntimeService/ListBackups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ListBackups(ctx, req.(*ListBackupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _RuntimeService_RunJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RunJobRequest) if err := dec(in); err != nil { @@ -1233,6 +1297,14 @@ var RuntimeService_ServiceDesc = grpc.ServiceDesc{ MethodName: "BackupDryRun", Handler: _RuntimeService_BackupDryRun_Handler, }, + { + MethodName: "Backup", + Handler: _RuntimeService_Backup_Handler, + }, + { + MethodName: "ListBackups", + Handler: _RuntimeService_ListBackups_Handler, + }, { MethodName: "RunJob", Handler: _RuntimeService_RunJob_Handler, diff --git a/api/third_party/openapi/odpf/optimus/runtime_service.swagger.json b/api/third_party/openapi/odpf/optimus/runtime_service.swagger.json index 09ac8bacb5..623ab3fa91 100644 --- a/api/third_party/openapi/odpf/optimus/runtime_service.swagger.json +++ b/api/third_party/openapi/odpf/optimus/runtime_service.swagger.json @@ -357,15 +357,14 @@ ] } }, - "/v1/project/{projectName}/namespace/{namespace}/datastore/{datastoreName}/resource": { + "/v1/project/{projectName}/namespace/{namespace}/datastore/{datastoreName}/backup": { "get": { - "summary": "ListResourceSpecification lists all resource specifications of a datastore in project", - "operationId": "RuntimeService_ListResourceSpecification", + "operationId": "RuntimeService_ListBackups", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/optimusListResourceSpecificationResponse" + "$ref": "#/definitions/optimusListBackupsResponse" } }, "default": { @@ -400,13 +399,12 @@ ] }, "post": { - "summary": "Database CRUD\nCreateResource registers a new resource of a namespace which belongs to a project", - "operationId": "RuntimeService_CreateResource", + "operationId": "RuntimeService_Backup", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/optimusCreateResourceResponse" + "$ref": "#/definitions/optimusBackupResponse" } }, "default": { @@ -442,8 +440,20 @@ "schema": { "type": "object", "properties": { - "resource": { - "$ref": "#/definitions/optimusResourceSpecification" + "resourceName": { + "type": "string" + }, + "description": { + "type": "string" + }, + "ignoreDownstream": { + "type": "boolean" + }, + "config": { + "type": "object", + "additionalProperties": { + "type": "string" + } } } } @@ -452,15 +462,16 @@ "tags": [ "RuntimeService" ] - }, - "put": { - "summary": "UpdateResource updates a resource specification of a datastore in project", - "operationId": "RuntimeService_UpdateResource", + } + }, + "/v1/project/{projectName}/namespace/{namespace}/datastore/{datastoreName}/backup-dryrun": { + "post": { + "operationId": "RuntimeService_BackupDryRun", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/optimusUpdateResourceResponse" + "$ref": "#/definitions/optimusBackupDryRunResponse" } }, "default": { @@ -496,8 +507,14 @@ "schema": { "type": "object", "properties": { - "resource": { - "$ref": "#/definitions/optimusResourceSpecification" + "resourceName": { + "type": "string" + }, + "description": { + "type": "string" + }, + "ignoreDownstream": { + "type": "boolean" } } } @@ -508,15 +525,15 @@ ] } }, - "/v1/project/{projectName}/namespace/{namespace}/datastore/{datastoreName}/resource/{resourceName}": { + "/v1/project/{projectName}/namespace/{namespace}/datastore/{datastoreName}/resource": { "get": { - "summary": "ReadResource reads a provided resource spec of a namespace", - "operationId": "RuntimeService_ReadResource", + "summary": "ListResourceSpecification lists all resource specifications of a datastore in project", + "operationId": "RuntimeService_ListResourceSpecification", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/optimusReadResourceResponse" + "$ref": "#/definitions/optimusListResourceSpecificationResponse" } }, "default": { @@ -544,27 +561,20 @@ "in": "path", "required": true, "type": "string" - }, - { - "name": "resourceName", - "in": "path", - "required": true, - "type": "string" } ], "tags": [ "RuntimeService" ] - } - }, - "/v1/project/{projectName}/namespace/{namespace}/datastore/{datastoreName}/resource/{resourceName}/backup/dryrun": { + }, "post": { - "operationId": "RuntimeService_BackupDryRun", + "summary": "Database CRUD\nCreateResource registers a new resource of a namespace which belongs to a project", + "operationId": "RuntimeService_CreateResource", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/optimusBackupDryRunResponse" + "$ref": "#/definitions/optimusCreateResourceResponse" } }, "default": { @@ -594,7 +604,55 @@ "type": "string" }, { - "name": "resourceName", + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "resource": { + "$ref": "#/definitions/optimusResourceSpecification" + } + } + } + } + ], + "tags": [ + "RuntimeService" + ] + }, + "put": { + "summary": "UpdateResource updates a resource specification of a datastore in project", + "operationId": "RuntimeService_UpdateResource", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/optimusUpdateResourceResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "projectName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "datastoreName", "in": "path", "required": true, "type": "string" @@ -606,11 +664,8 @@ "schema": { "type": "object", "properties": { - "description": { - "type": "string" - }, - "ignoreDownstream": { - "type": "boolean" + "resource": { + "$ref": "#/definitions/optimusResourceSpecification" } } } @@ -621,6 +676,55 @@ ] } }, + "/v1/project/{projectName}/namespace/{namespace}/datastore/{datastoreName}/resource/{resourceName}": { + "get": { + "summary": "ReadResource reads a provided resource spec of a namespace", + "operationId": "RuntimeService_ReadResource", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/optimusReadResourceResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "projectName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "datastoreName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "resourceName", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "RuntimeService" + ] + } + }, "/v1/project/{projectName}/namespace/{namespace}/job": { "post": { "summary": "CreateJobSpecification registers a new job for a namespace which belongs to a project", @@ -1250,6 +1354,35 @@ } } }, + "optimusBackupResponse": { + "type": "object", + "properties": { + "urn": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "optimusBackupSpec": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "resourceName": { + "type": "string" + }, + "createdAt": { + "type": "string", + "format": "date-time" + }, + "description": { + "type": "string" + } + } + }, "optimusCheckJobSpecificationResponse": { "type": "object", "properties": { @@ -1612,6 +1745,17 @@ } } }, + "optimusListBackupsResponse": { + "type": "object", + "properties": { + "backups": { + "type": "array", + "items": { + "$ref": "#/definitions/optimusBackupSpec" + } + } + } + }, "optimusListJobSpecificationResponse": { "type": "object", "properties": { diff --git a/cmd/backup.go b/cmd/backup.go index 75327cab08..6beac8a8cd 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -3,6 +3,9 @@ package cmd import ( "context" "fmt" + "time" + + "github.com/olekukonko/tablewriter" "github.com/AlecAivazis/survey/v2" pb "github.com/odpf/optimus/api/proto/odpf/optimus" @@ -14,6 +17,10 @@ import ( "google.golang.org/grpc" ) +var ( + backupTimeout = time.Minute * 1 +) + func backupCommand(l log.Logger, datastoreRepo models.DatastoreRepo, conf config.Provider) *cli.Command { cmd := &cli.Command{ Use: "backup", @@ -21,6 +28,7 @@ func backupCommand(l log.Logger, datastoreRepo models.DatastoreRepo, conf config Long: "Backup supported resource of a datastore and all of its downstream dependencies", } cmd.AddCommand(backupResourceSubCommand(l, datastoreRepo, conf)) + cmd.AddCommand(backupListSubCommand(l, datastoreRepo, conf)) return cmd } @@ -49,7 +57,7 @@ func backupResourceSubCommand(l log.Logger, datastoreRepo models.DatastoreRepo, } var storerName string if err := survey.AskOne(&survey.Select{ - Message: "Select supported datastores?", + Message: "Select supported datastore?", Options: availableStorer, }, &storerName); err != nil { return err @@ -88,7 +96,8 @@ func backupResourceSubCommand(l log.Logger, datastoreRepo models.DatastoreRepo, resourceName := inputs["name"].(string) description := inputs["description"].(string) backupDownstream := inputs["backupDownstream"].(bool) - backupRequest := &pb.BackupDryRunRequest{ + + backupDryRunRequest := &pb.BackupDryRunRequest{ ProjectName: project, Namespace: namespace, ResourceName: resourceName, @@ -97,7 +106,46 @@ func backupResourceSubCommand(l log.Logger, datastoreRepo models.DatastoreRepo, IgnoreDownstream: !backupDownstream, } - return runBackupDryRunRequest(l, conf, backupRequest) + if err := runBackupDryRunRequest(l, conf, backupDryRunRequest); err != nil { + l.Info("unable to run backup dry run") + return err + } + + if dryRun { + //if only dry run, exit now + return nil + } + + proceedWithBackup := "Yes" + if err := survey.AskOne(&survey.Select{ + Message: "Proceed the backup?", + Options: []string{"Yes", "No"}, + Default: "Yes", + }, &proceedWithBackup); err != nil { + return err + } + + if proceedWithBackup == "No" { + l.Info("aborting...") + return nil + } + + backupRequest := &pb.BackupRequest{ + ProjectName: project, + Namespace: namespace, + ResourceName: resourceName, + DatastoreName: storerName, + Description: description, + IgnoreDownstream: !backupDownstream, + } + + for _, ds := range conf.GetDatastore() { + if ds.Type == storerName { + backupRequest.Config = ds.Backup + } + } + + return runBackupRequest(l, conf, backupRequest) } return backupCmd } @@ -115,7 +163,7 @@ func runBackupDryRunRequest(l log.Logger, conf config.Provider, backupRequest *p } defer conn.Close() - requestTimeoutCtx, requestCancel := context.WithTimeout(context.Background(), replayTimeout) + requestTimeoutCtx, requestCancel := context.WithTimeout(context.Background(), backupTimeout) defer requestCancel() l.Info("please wait...") @@ -133,6 +181,47 @@ func runBackupDryRunRequest(l log.Logger, conf config.Provider, backupRequest *p return nil } +func runBackupRequest(l log.Logger, conf config.Provider, backupRequest *pb.BackupRequest) (err error) { + dialTimeoutCtx, dialCancel := context.WithTimeout(context.Background(), OptimusDialTimeout) + defer dialCancel() + + var conn *grpc.ClientConn + if conn, err = createConnection(dialTimeoutCtx, conf.GetHost()); err != nil { + if errors.Is(err, context.DeadlineExceeded) { + l.Info("can't reach optimus service") + } + return err + } + defer conn.Close() + + requestTimeout, requestCancel := context.WithTimeout(context.Background(), backupTimeout) + defer requestCancel() + + l.Info("please wait...") + runtime := pb.NewRuntimeServiceClient(conn) + + backupResponse, err := runtime.Backup(requestTimeout, backupRequest) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + l.Info("backup took too long, timing out") + } + return errors.Wrapf(err, "request failed to backup job %s", backupRequest.ResourceName) + } + + printBackupResponse(l, backupResponse) + return nil +} + +func printBackupResponse(l log.Logger, backupResponse *pb.BackupResponse) { + l.Info(coloredSuccess("\nBackup Finished")) + l.Info("Resource backup completed successfully:") + counter := 1 + for _, result := range backupResponse.Urn { + l.Info(fmt.Sprintf("%d. %s", counter, result)) + counter++ + } +} + func printBackupDryRunResponse(l log.Logger, backupRequest *pb.BackupDryRunRequest, backupResponse *pb.BackupDryRunResponse) { if backupRequest.IgnoreDownstream { l.Info(coloredPrint(fmt.Sprintf("Backup list for %s. Downstreams will be ignored.", backupRequest.ResourceName))) @@ -145,3 +234,89 @@ func printBackupDryRunResponse(l log.Logger, backupRequest *pb.BackupDryRunReque counter++ } } + +func backupListSubCommand(l log.Logger, datastoreRepo models.DatastoreRepo, conf config.Provider) *cli.Command { + backupCmd := &cli.Command{ + Use: "list", + Short: "get list of backup per project and datastore", + } + + var ( + project string + ) + + backupCmd.Flags().StringVarP(&project, "project", "p", "", "project name of optimus managed repository") + backupCmd.MarkFlagRequired("project") + + backupCmd.RunE = func(cmd *cli.Command, args []string) error { + availableStorer := []string{} + for _, s := range datastoreRepo.GetAll() { + availableStorer = append(availableStorer, s.Name()) + } + var storerName string + if err := survey.AskOne(&survey.Select{ + Message: "Select supported datastore?", + Options: availableStorer, + }, &storerName); err != nil { + return err + } + + listBackupsRequest := &pb.ListBackupsRequest{ + ProjectName: project, + DatastoreName: storerName, + } + + dialTimeoutCtx, dialCancel := context.WithTimeout(context.Background(), OptimusDialTimeout) + defer dialCancel() + + conn, err := createConnection(dialTimeoutCtx, conf.GetHost()) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + l.Info("can't reach optimus service") + } + return err + } + defer conn.Close() + + requestTimeout, requestCancel := context.WithTimeout(context.Background(), backupTimeout) + defer requestCancel() + + l.Info("please wait...") + runtime := pb.NewRuntimeServiceClient(conn) + + listBackupsResponse, err := runtime.ListBackups(requestTimeout, listBackupsRequest) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + l.Info("getting list of backups took too long, timing out") + } + return errors.Wrapf(err, "request failed to get list backups") + } + + if len(listBackupsResponse.Backups) == 0 { + l.Info(fmt.Sprintf("no backups were found in %s project.", project)) + } else { + printBackupListResponse(l, listBackupsResponse) + } + return nil + } + return backupCmd +} + +func printBackupListResponse(l log.Logger, listBackupsResponse *pb.ListBackupsResponse) { + l.Info(coloredNotice("Latest Backups")) + table := tablewriter.NewWriter(l.Writer()) + table.SetBorder(false) + table.SetHeader([]string{ + "ID", + "Resource", + "Created", + "Description", + }) + + for _, backupSpec := range listBackupsResponse.Backups { + table.Append([]string{backupSpec.Id, backupSpec.ResourceName, backupSpec.CreatedAt.AsTime().Format(time.RFC3339), + backupSpec.Description}) + } + + table.Render() +} diff --git a/cmd/server/server.go b/cmd/server/server.go index 7c16f18b1c..c337650aeb 100644 --- a/cmd/server/server.go +++ b/cmd/server/server.go @@ -169,6 +169,15 @@ func (fac *resourceSpecRepoFactory) New(namespace models.NamespaceSpec, ds model return postgres.NewResourceSpecRepository(fac.db, namespace, ds, fac.projectResourceSpecRepoFac.New(namespace.ProjectSpec, ds)) } +// backupRepoFactory stores backup specifications +type backupRepoFactory struct { + db *gorm.DB +} + +func (fac *backupRepoFactory) New(projectSpec models.ProjectSpec, storer models.Datastorer) store.BackupRepository { + return postgres.NewBackupRepository(fac.db, projectSpec, storer) +} + type airflowBucketFactory struct{} func (o *airflowBucketFactory) New(ctx context.Context, projectSpec models.ProjectSpec) (airflow2.Bucket, error) { @@ -435,6 +444,9 @@ func Initialize(l log.Logger, conf config.Provider) error { WorkerTimeout: conf.GetServe().ReplayWorkerTimeoutSecs, RunTimeout: conf.GetServe().ReplayRunTimeoutSecs, }, models.BatchScheduler, replayValidator, replaySyncer) + backupRepoFac := backupRepoFactory{ + db: dbConn, + } notificationContext, cancelNotifiers := context.WithCancel(context.Background()) defer cancelNotifiers() @@ -463,7 +475,7 @@ func Initialize(l log.Logger, conf config.Provider) error { replayManager, ), eventService, - datastore.NewService(&resourceSpecRepoFac, models.DatastoreRegistry), + datastore.NewService(&resourceSpecRepoFac, models.DatastoreRegistry, utils.NewUUIDProvider(), &backupRepoFac), projectRepoFac, namespaceSpecRepoFac, projectSecretRepoFac, diff --git a/config/config.go b/config/config.go index 29d98051fc..4ab878c838 100644 --- a/config/config.go +++ b/config/config.go @@ -1,6 +1,7 @@ package config import ( + "encoding/json" "strings" "time" @@ -71,6 +72,9 @@ type Datastore struct { // directory to find specifications Path string `yaml:"path" koanf:"path"` + + // backup configuration + Backup map[string]string `yaml:"backup" koanf:"backup"` } type Job struct { @@ -177,7 +181,13 @@ func (o *Optimus) GetJob() Job { func (o *Optimus) GetDatastore() []Datastore { ds := []Datastore{} - _ = o.k.Unmarshal("datastore", &ds) + if o.k.Get("datastore") != nil { + err := o.k.Unmarshal("datastore", &ds) + if err != nil { + // env var loaded config is in string + json.Unmarshal(o.k.Bytes("datastore"), &ds) + } + } return ds } diff --git a/datastore/service.go b/datastore/service.go index 01ddabcded..4e36bb708c 100644 --- a/datastore/service.go +++ b/datastore/service.go @@ -3,6 +3,9 @@ package datastore import ( "context" "fmt" + "time" + + "github.com/odpf/optimus/utils" "github.com/hashicorp/go-multierror" "github.com/kushsharma/parallel" @@ -14,6 +17,9 @@ import ( const ( ConcurrentTicketPerSec = 5 ConcurrentLimit = 20 + + //backupListWindow window interval to fetch recent backups + backupListWindow = -3 * 30 * 24 * time.Hour ) type ResourceSpecRepoFactory interface { @@ -24,9 +30,15 @@ type ProjectResourceSpecRepoFactory interface { New(spec models.ProjectSpec, storer models.Datastorer) store.ProjectResourceSpecRepository } +type BackupRepoFactory interface { + New(spec models.ProjectSpec, storer models.Datastorer) store.BackupRepository +} + type Service struct { resourceRepoFactory ResourceSpecRepoFactory dsRepo models.DatastoreRepo + backupRepoFactory BackupRepoFactory + uuidProvider utils.UUIDProvider } func (srv Service) GetAll(namespace models.NamespaceSpec, datastoreName string) ([]models.ResourceSpec, error) { @@ -142,24 +154,32 @@ func (srv Service) DeleteResource(ctx context.Context, namespace models.Namespac return repo.Delete(name) } -func (srv Service) BackupResourceDryRun(ctx context.Context, projectSpec models.ProjectSpec, namespaceSpec models.NamespaceSpec, jobSpecs []models.JobSpec) ([]string, error) { +func generateResourceDestination(ctx context.Context, jobSpec models.JobSpec) (*models.GenerateDestinationResponse, error) { + return jobSpec.Task.Unit.DependencyMod.GenerateDestination(ctx, models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobSpec.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobSpec.Assets), + }) +} + +func (srv Service) getResourceSpec(datastorer models.Datastorer, namespace models.NamespaceSpec, destinationURN string) (models.ResourceSpec, error) { + repo := srv.resourceRepoFactory.New(namespace, datastorer) + return repo.GetByURN(destinationURN) +} + +func (srv Service) BackupResourceDryRun(ctx context.Context, backupRequest models.BackupRequest, jobSpecs []models.JobSpec) ([]string, error) { var resourcesToBackup []string for _, jobSpec := range jobSpecs { - destination, err := jobSpec.Task.Unit.DependencyMod.GenerateDestination(ctx, models.GenerateDestinationRequest{ - Config: models.PluginConfigs{}.FromJobSpec(jobSpec.Task.Config), - Assets: models.PluginAssets{}.FromJobSpec(jobSpec.Assets), - }) + destination, err := generateResourceDestination(ctx, jobSpec) if err != nil { return nil, err } - ds, err := srv.dsRepo.GetByName(destination.Type.String()) + datastorer, err := srv.dsRepo.GetByName(destination.Type.String()) if err != nil { return nil, err } - repo := srv.resourceRepoFactory.New(namespaceSpec, ds) - resourceSpec, err := repo.GetByURN(destination.URN()) + resourceSpec, err := srv.getResourceSpec(datastorer, backupRequest.Namespace, destination.URN()) if err != nil { if err == store.ErrResourceNotFound { continue @@ -167,22 +187,121 @@ func (srv Service) BackupResourceDryRun(ctx context.Context, projectSpec models. return nil, err } - backupReq := models.BackupResourceRequest{ - Resource: resourceSpec, - Project: projectSpec, - DryRun: true, - } - if err := ds.BackupResource(ctx, backupReq); err != nil { + //do backup in storer + _, err = datastorer.BackupResource(ctx, models.BackupResourceRequest{ + Resource: resourceSpec, + BackupSpec: backupRequest, + }) + if err != nil { if err == models.ErrUnsupportedResource { continue } return nil, err } + resourcesToBackup = append(resourcesToBackup, destination.Destination) } return resourcesToBackup, nil } +func (srv Service) BackupResource(ctx context.Context, backupRequest models.BackupRequest, jobSpecs []models.JobSpec) ([]string, error) { + backupSpec, err := srv.prepareBackupSpec(backupRequest) + if err != nil { + return nil, err + } + backupRequest.ID = backupSpec.ID + + var backupResult []string + for _, jobSpec := range jobSpecs { + destination, err := generateResourceDestination(ctx, jobSpec) + if err != nil { + return nil, err + } + + datastorer, err := srv.dsRepo.GetByName(destination.Type.String()) + if err != nil { + return nil, err + } + + resourceSpec, err := srv.getResourceSpec(datastorer, backupRequest.Namespace, destination.URN()) + if err != nil { + if err == store.ErrResourceNotFound { + continue + } + return nil, err + } + + //do backup in storer + backupResp, err := datastorer.BackupResource(ctx, models.BackupResourceRequest{ + Resource: resourceSpec, + BackupSpec: backupRequest, + BackupTime: time.Now(), + }) + if err != nil { + if err == models.ErrUnsupportedResource { + continue + } + return nil, err + } + // form slices of result urn to return + backupResult = append(backupResult, backupResp.ResultURN) + // enrich backup spec with result detail to be saved + backupSpec.Result[destination.Destination] = models.BackupResult{ + URN: backupResp.ResultURN, + Spec: backupResp.ResultSpec, + } + // enrich backup spec with resource detail to be saved + if resourceSpec.Name == backupRequest.ResourceName { + backupSpec.Resource = resourceSpec + } + } + + //save the backup + backupRepo := srv.backupRepoFactory.New(backupRequest.Project, backupSpec.Resource.Datastore) + if err := backupRepo.Save(backupSpec); err != nil { + return nil, err + } + + return backupResult, nil +} + +func (srv Service) ListBackupResources(projectSpec models.ProjectSpec, datastoreName string) ([]models.BackupSpec, error) { + datastorer, err := srv.dsRepo.GetByName(datastoreName) + if err != nil { + return []models.BackupSpec{}, err + } + + backupRepo := srv.backupRepoFactory.New(projectSpec, datastorer) + backupSpecs, err := backupRepo.GetAll() + if err != nil { + if err == store.ErrResourceNotFound { + return []models.BackupSpec{}, nil + } + return []models.BackupSpec{}, err + } + + var recentBackups []models.BackupSpec + for _, backup := range backupSpecs { + if backup.CreatedAt.After(time.Now().UTC().Add(backupListWindow)) { + recentBackups = append(recentBackups, backup) + } + } + return recentBackups, nil +} + +func (srv Service) prepareBackupSpec(backupRequest models.BackupRequest) (models.BackupSpec, error) { + backupID, err := srv.uuidProvider.NewUUID() + if err != nil { + return models.BackupSpec{}, err + } + return models.BackupSpec{ + ID: backupID, + Description: backupRequest.Description, + Config: backupRequest.Config, + Result: make(map[string]interface{}), + }, nil +} + func (srv *Service) notifyProgress(po progress.Observer, event progress.Event) { if po == nil { return @@ -190,10 +309,13 @@ func (srv *Service) notifyProgress(po progress.Observer, event progress.Event) { po.Notify(event) } -func NewService(resourceRepoFactory ResourceSpecRepoFactory, dsRepo models.DatastoreRepo) *Service { +func NewService(resourceRepoFactory ResourceSpecRepoFactory, dsRepo models.DatastoreRepo, uuidProvider utils.UUIDProvider, + backupRepoFactory BackupRepoFactory) *Service { return &Service{ resourceRepoFactory: resourceRepoFactory, dsRepo: dsRepo, + backupRepoFactory: backupRepoFactory, + uuidProvider: uuidProvider, } } diff --git a/datastore/service_test.go b/datastore/service_test.go index e69a19d510..e46be09f4c 100644 --- a/datastore/service_test.go +++ b/datastore/service_test.go @@ -60,7 +60,7 @@ func TestService(t *testing.T) { projectResourceRepoFac := new(mock.ProjectResourceSpecRepoFactory) defer projectResourceRepoFac.AssertExpectations(t) - service := datastore.NewService(resourceRepoFac, dsRepo) + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) res, err := service.GetAll(namespaceSpec, "bq") assert.Nil(t, err) assert.Equal(t, []models.ResourceSpec{resourceSpec1}, res) @@ -108,7 +108,7 @@ func TestService(t *testing.T) { projectResourceRepoFac := new(mock.ProjectResourceSpecRepoFactory) defer projectResourceRepoFac.AssertExpectations(t) - service := datastore.NewService(resourceRepoFac, dsRepo) + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) err := service.CreateResource(context.TODO(), namespaceSpec, []models.ResourceSpec{resourceSpec1, resourceSpec2}, nil) assert.Nil(t, err) }) @@ -148,7 +148,7 @@ func TestService(t *testing.T) { projectResourceRepoFac := new(mock.ProjectResourceSpecRepoFactory) defer projectResourceRepoFac.AssertExpectations(t) - service := datastore.NewService(resourceRepoFac, dsRepo) + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) err := service.CreateResource(context.TODO(), namespaceSpec, []models.ResourceSpec{resourceSpec1, resourceSpec2}, nil) assert.NotNil(t, err) }) @@ -194,7 +194,7 @@ func TestService(t *testing.T) { projectResourceRepoFac := new(mock.ProjectResourceSpecRepoFactory) defer projectResourceRepoFac.AssertExpectations(t) - service := datastore.NewService(resourceRepoFac, dsRepo) + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) err := service.UpdateResource(context.TODO(), namespaceSpec, []models.ResourceSpec{resourceSpec1, resourceSpec2}, nil) assert.Nil(t, err) }) @@ -234,7 +234,7 @@ func TestService(t *testing.T) { projectResourceRepoFac := new(mock.ProjectResourceSpecRepoFactory) defer projectResourceRepoFac.AssertExpectations(t) - service := datastore.NewService(resourceRepoFac, dsRepo) + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) err := service.UpdateResource(context.TODO(), namespaceSpec, []models.ResourceSpec{resourceSpec1, resourceSpec2}, nil) assert.NotNil(t, err) }) @@ -270,7 +270,7 @@ func TestService(t *testing.T) { projectResourceRepoFac := new(mock.ProjectResourceSpecRepoFactory) defer projectResourceRepoFac.AssertExpectations(t) - service := datastore.NewService(resourceRepoFac, dsRepo) + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) resp, err := service.ReadResource(context.TODO(), namespaceSpec, "bq", resourceSpec1.Name) assert.Nil(t, err) assert.Equal(t, resourceSpec1, resp) @@ -301,7 +301,7 @@ func TestService(t *testing.T) { projectResourceRepoFac := new(mock.ProjectResourceSpecRepoFactory) defer projectResourceRepoFac.AssertExpectations(t) - service := datastore.NewService(resourceRepoFac, dsRepo) + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) _, err := service.ReadResource(context.TODO(), namespaceSpec, "bq", resourceSpec1.Name) assert.NotNil(t, err) }) @@ -338,7 +338,7 @@ func TestService(t *testing.T) { projectResourceRepoFac := new(mock.ProjectResourceSpecRepoFactory) defer projectResourceRepoFac.AssertExpectations(t) - service := datastore.NewService(resourceRepoFac, dsRepo) + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) err := service.DeleteResource(context.TODO(), namespaceSpec, "bq", resourceSpec1.Name) assert.Nil(t, err) }) @@ -372,7 +372,7 @@ func TestService(t *testing.T) { projectResourceRepoFac := new(mock.ProjectResourceSpecRepoFactory) defer projectResourceRepoFac.AssertExpectations(t) - service := datastore.NewService(resourceRepoFac, dsRepo) + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) err := service.DeleteResource(context.TODO(), namespaceSpec, "bq", resourceSpec1.Name) assert.NotNil(t, err) }) @@ -399,13 +399,30 @@ func TestService(t *testing.T) { Value: "select * from 1", }, }) + destination := &models.GenerateDestinationResponse{ + Destination: "project.dataset.table", + Type: models.DestinationTypeBigquery, + } + t.Run("should return list of resources without dependents to be backed up", func(t *testing.T) { + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + execUnit := new(mock.BasePlugin) defer execUnit.AssertExpectations(t) depMod := new(mock.DependencyResolverMod) defer depMod.AssertExpectations(t) + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} jobSpec := models.JobSpec{ ID: uuid.Must(uuid.NewRandom()), @@ -413,47 +430,36 @@ func TestService(t *testing.T) { Task: jobTask, Assets: jobAssets, } - unitData := models.GenerateDestinationRequest{ Config: models.PluginConfigs{}.FromJobSpec(jobSpec.Task.Config), Assets: models.PluginAssets{}.FromJobSpec(jobSpec.Assets), } - destination := &models.GenerateDestinationResponse{ - Destination: "project.dataset.table", - Type: models.DestinationTypeBigquery, - } - depMod.On("GenerateDestination", context.TODO(), unitData).Return(destination, nil) - - datastorer := new(mock.Datastorer) - defer datastorer.AssertExpectations(t) - - dsRepo := new(mock.SupportedDatastoreRepo) - defer dsRepo.AssertExpectations(t) - dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) - resourceSpec := models.ResourceSpec{ Version: 1, Name: "project.dataset.table", Type: models.ResourceTypeTable, Datastore: datastorer, } - resourceRepo := new(mock.ResourceSpecRepository) - resourceRepo.On("GetByURN", destination.URN()).Return(resourceSpec, nil) - defer resourceRepo.AssertExpectations(t) - - resourceRepoFac := new(mock.ResourceSpecRepoFactory) - resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) - defer resourceRepoFac.AssertExpectations(t) - + backupReq := models.BackupRequest{ + ResourceName: resourceSpec.Name, + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, + } backupResourceReq := models.BackupResourceRequest{ - Resource: resourceSpec, - Project: projectSpec, - DryRun: true, + Resource: resourceSpec, + BackupSpec: backupReq, } - datastorer.On("BackupResource", context.TODO(), backupResourceReq).Return(nil) - service := datastore.NewService(resourceRepoFac, dsRepo) - resp, err := service.BackupResourceDryRun(context.TODO(), projectSpec, namespaceSpec, []models.JobSpec{jobSpec}) + depMod.On("GenerateDestination", context.TODO(), unitData).Return(destination, nil) + dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) + resourceRepo.On("GetByURN", destination.URN()).Return(resourceSpec, nil) + datastorer.On("BackupResource", context.TODO(), backupResourceReq).Return(models.BackupResourceResponse{}, nil) + + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) + resp, err := service.BackupResourceDryRun(context.TODO(), backupReq, []models.JobSpec{jobSpec}) assert.Nil(t, err) assert.Equal(t, []string{destination.Destination}, resp) }) @@ -469,7 +475,12 @@ func TestService(t *testing.T) { dsRepo := new(mock.SupportedDatastoreRepo) defer dsRepo.AssertExpectations(t) - dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} jobDownstream := models.JobSpec{ @@ -482,6 +493,7 @@ func TestService(t *testing.T) { dependencies[jobDownstream.GetName()] = models.JobSpecDependency{ Job: &jobDownstream, } + jobRoot := models.JobSpec{ ID: uuid.Must(uuid.NewRandom()), Name: "job-1", @@ -489,64 +501,66 @@ func TestService(t *testing.T) { Assets: jobAssets, Dependencies: dependencies, } - unitRoot := models.GenerateDestinationRequest{ Config: models.PluginConfigs{}.FromJobSpec(jobRoot.Task.Config), Assets: models.PluginAssets{}.FromJobSpec(jobRoot.Assets), } destinationRoot := &models.GenerateDestinationResponse{ - Destination: "project.dataset.table", + Destination: "project:dataset.root", Type: models.DestinationTypeBigquery, } - depMod.On("GenerateDestination", context.TODO(), unitRoot).Return(destinationRoot, nil).Once() + resourceRoot := models.ResourceSpec{ + Version: 1, + Name: "project.dataset.root", + Type: models.ResourceTypeTable, + Datastore: datastorer, + } + backupReq := models.BackupRequest{ + ResourceName: resourceRoot.Name, + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, + } + backupResourceReqRoot := models.BackupResourceRequest{ + Resource: resourceRoot, + BackupSpec: backupReq, + } unitDownstream := models.GenerateDestinationRequest{ Config: models.PluginConfigs{}.FromJobSpec(jobDownstream.Task.Config), Assets: models.PluginAssets{}.FromJobSpec(jobDownstream.Assets), } destinationDownstream := &models.GenerateDestinationResponse{ - Destination: "project.dataset.downstream", + Destination: "project:dataset.downstream", Type: models.DestinationTypeBigquery, } - depMod.On("GenerateDestination", context.TODO(), unitDownstream).Return(destinationDownstream, nil).Once() - - resourceRoot := models.ResourceSpec{ - Version: 1, - Name: "project.dataset.root", - Type: models.ResourceTypeTable, - Datastore: datastorer, - } resourceDownstream := models.ResourceSpec{ Version: 1, Name: "project.dataset.downstream", Type: models.ResourceTypeTable, Datastore: datastorer, } - resourceRepo := new(mock.ResourceSpecRepository) + backupResourceReqDownstream := models.BackupResourceRequest{ + Resource: resourceDownstream, + BackupSpec: backupReq, + } + + dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + + depMod.On("GenerateDestination", context.TODO(), unitRoot).Return(destinationRoot, nil).Once() resourceRepo.On("GetByURN", destinationRoot.URN()).Return(resourceRoot, nil).Once() + datastorer.On("BackupResource", context.TODO(), backupResourceReqRoot).Return(models.BackupResourceResponse{}, nil).Once() + + depMod.On("GenerateDestination", context.TODO(), unitDownstream).Return(destinationDownstream, nil).Once() resourceRepo.On("GetByURN", destinationDownstream.URN()).Return(resourceDownstream, nil).Once() - defer resourceRepo.AssertExpectations(t) + datastorer.On("BackupResource", context.TODO(), backupResourceReqDownstream).Return(models.BackupResourceResponse{}, nil).Once() - resourceRepoFac := new(mock.ResourceSpecRepoFactory) resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) - defer resourceRepoFac.AssertExpectations(t) - - backupResourceReqRoot := models.BackupResourceRequest{ - Resource: resourceRoot, - Project: projectSpec, - DryRun: true, - } - datastorer.On("BackupResource", context.TODO(), backupResourceReqRoot).Return(nil).Once() - backupResourceReqDownstream := models.BackupResourceRequest{ - Resource: resourceDownstream, - Project: projectSpec, - DryRun: true, - } - datastorer.On("BackupResource", context.TODO(), backupResourceReqDownstream).Return(nil).Once() + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) + resp, err := service.BackupResourceDryRun(context.TODO(), backupReq, []models.JobSpec{jobRoot, jobDownstream}) - service := datastore.NewService(resourceRepoFac, dsRepo) - resp, err := service.BackupResourceDryRun(context.TODO(), projectSpec, namespaceSpec, []models.JobSpec{jobRoot, jobDownstream}) assert.Nil(t, err) assert.Equal(t, []string{destinationRoot.Destination, destinationDownstream.Destination}, resp) }) @@ -557,6 +571,9 @@ func TestService(t *testing.T) { depMod := new(mock.DependencyResolverMod) defer depMod.AssertExpectations(t) + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} jobSpec := models.JobSpec{ ID: uuid.Must(uuid.NewRandom()), @@ -564,20 +581,23 @@ func TestService(t *testing.T) { Task: jobTask, Assets: jobAssets, } - unitData := models.GenerateDestinationRequest{ Config: models.PluginConfigs{}.FromJobSpec(jobSpec.Task.Config), Assets: models.PluginAssets{}.FromJobSpec(jobSpec.Assets), } + backupReq := models.BackupRequest{ + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, + } errorMsg := "unable to generate destination" depMod.On("GenerateDestination", context.TODO(), unitData).Return(&models.GenerateDestinationResponse{}, errors.New(errorMsg)) - dsRepo := new(mock.SupportedDatastoreRepo) - defer dsRepo.AssertExpectations(t) + service := datastore.NewService(nil, dsRepo, nil, nil) + resp, err := service.BackupResourceDryRun(context.TODO(), backupReq, []models.JobSpec{jobSpec}) - service := datastore.NewService(nil, dsRepo) - resp, err := service.BackupResourceDryRun(context.TODO(), projectSpec, namespaceSpec, []models.JobSpec{jobSpec}) assert.Contains(t, err.Error(), errorMsg) assert.Nil(t, resp) }) @@ -588,6 +608,12 @@ func TestService(t *testing.T) { depMod := new(mock.DependencyResolverMod) defer depMod.AssertExpectations(t) + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} jobSpec := models.JobSpec{ ID: uuid.Must(uuid.NewRandom()), @@ -595,28 +621,25 @@ func TestService(t *testing.T) { Task: jobTask, Assets: jobAssets, } - unitData := models.GenerateDestinationRequest{ Config: models.PluginConfigs{}.FromJobSpec(jobSpec.Task.Config), Assets: models.PluginAssets{}.FromJobSpec(jobSpec.Assets), } - destination := &models.GenerateDestinationResponse{ - Destination: "project.dataset.table", - Type: models.DestinationTypeBigquery, + backupReq := models.BackupRequest{ + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, } depMod.On("GenerateDestination", context.TODO(), unitData).Return(destination, nil) - datastorer := new(mock.Datastorer) - defer datastorer.AssertExpectations(t) - - dsRepo := new(mock.SupportedDatastoreRepo) - defer dsRepo.AssertExpectations(t) errorMsg := "unable to get datastorer" dsRepo.On("GetByName", destination.Type.String()).Return(datastorer, errors.New(errorMsg)) - service := datastore.NewService(nil, dsRepo) - resp, err := service.BackupResourceDryRun(context.TODO(), projectSpec, namespaceSpec, []models.JobSpec{jobSpec}) + service := datastore.NewService(nil, dsRepo, nil, nil) + resp, err := service.BackupResourceDryRun(context.TODO(), backupReq, []models.JobSpec{jobSpec}) + assert.Contains(t, err.Error(), errorMsg) assert.Nil(t, resp) }) @@ -627,6 +650,18 @@ func TestService(t *testing.T) { depMod := new(mock.DependencyResolverMod) defer depMod.AssertExpectations(t) + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} jobSpec := models.JobSpec{ ID: uuid.Must(uuid.NewRandom()), @@ -634,48 +669,39 @@ func TestService(t *testing.T) { Task: jobTask, Assets: jobAssets, } - unitData := models.GenerateDestinationRequest{ Config: models.PluginConfigs{}.FromJobSpec(jobSpec.Task.Config), Assets: models.PluginAssets{}.FromJobSpec(jobSpec.Assets), } - destination := &models.GenerateDestinationResponse{ - Destination: "project.dataset.table", - Type: models.DestinationTypeBigquery, - } - depMod.On("GenerateDestination", context.TODO(), unitData).Return(destination, nil) - - datastorer := new(mock.Datastorer) - defer datastorer.AssertExpectations(t) - - dsRepo := new(mock.SupportedDatastoreRepo) - defer dsRepo.AssertExpectations(t) - dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) - resourceSpec := models.ResourceSpec{ Version: 1, Name: "project.dataset.table", Type: models.ResourceTypeTable, Datastore: datastorer, } - resourceRepo := new(mock.ResourceSpecRepository) - resourceRepo.On("GetByURN", destination.URN()).Return(resourceSpec, nil) - defer resourceRepo.AssertExpectations(t) + backupReq := models.BackupRequest{ + ResourceName: resourceSpec.Name, + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, + } + backupResourceReq := models.BackupResourceRequest{ + Resource: resourceSpec, + BackupSpec: backupReq, + } - resourceRepoFac := new(mock.ResourceSpecRepoFactory) + depMod.On("GenerateDestination", context.TODO(), unitData).Return(destination, nil) + dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + resourceRepo.On("GetByURN", destination.URN()).Return(resourceSpec, nil) resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) - defer resourceRepoFac.AssertExpectations(t) - backupResourceReq := models.BackupResourceRequest{ - Resource: resourceSpec, - Project: projectSpec, - DryRun: true, - } errorMsg := "unable to do backup dry run" - datastorer.On("BackupResource", context.TODO(), backupResourceReq).Return(errors.New(errorMsg)) + datastorer.On("BackupResource", context.TODO(), backupResourceReq).Return(models.BackupResourceResponse{}, errors.New(errorMsg)) + + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) + resp, err := service.BackupResourceDryRun(context.TODO(), backupReq, []models.JobSpec{jobSpec}) - service := datastore.NewService(resourceRepoFac, dsRepo) - resp, err := service.BackupResourceDryRun(context.TODO(), projectSpec, namespaceSpec, []models.JobSpec{jobSpec}) assert.Equal(t, errorMsg, err.Error()) assert.Nil(t, resp) }) @@ -686,6 +712,18 @@ func TestService(t *testing.T) { depMod := new(mock.DependencyResolverMod) defer depMod.AssertExpectations(t) + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} jobSpec := models.JobSpec{ ID: uuid.Must(uuid.NewRandom()), @@ -698,30 +736,22 @@ func TestService(t *testing.T) { Config: models.PluginConfigs{}.FromJobSpec(jobSpec.Task.Config), Assets: models.PluginAssets{}.FromJobSpec(jobSpec.Assets), } - destination := &models.GenerateDestinationResponse{ - Destination: "project.dataset.table", - Type: models.DestinationTypeBigquery, + backupReq := models.BackupRequest{ + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, } - depMod.On("GenerateDestination", context.TODO(), unitData).Return(destination, nil) - - datastorer := new(mock.Datastorer) - defer datastorer.AssertExpectations(t) - dsRepo := new(mock.SupportedDatastoreRepo) - defer dsRepo.AssertExpectations(t) + depMod.On("GenerateDestination", context.TODO(), unitData).Return(destination, nil) dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) - resourceRepo := new(mock.ResourceSpecRepository) - defer resourceRepo.AssertExpectations(t) errorMsg := "unable to get resource" resourceRepo.On("GetByURN", destination.URN()).Return(models.ResourceSpec{}, errors.New(errorMsg)) - resourceRepoFac := new(mock.ResourceSpecRepoFactory) - defer resourceRepoFac.AssertExpectations(t) - resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) + resp, err := service.BackupResourceDryRun(context.TODO(), backupReq, []models.JobSpec{jobSpec}) - service := datastore.NewService(resourceRepoFac, dsRepo) - resp, err := service.BackupResourceDryRun(context.TODO(), projectSpec, namespaceSpec, []models.JobSpec{jobSpec}) assert.Equal(t, errorMsg, err.Error()) assert.Nil(t, resp) }) @@ -737,7 +767,12 @@ func TestService(t *testing.T) { dsRepo := new(mock.SupportedDatastoreRepo) defer dsRepo.AssertExpectations(t) - dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} jobDownstream := models.JobSpec{ @@ -757,7 +792,6 @@ func TestService(t *testing.T) { Assets: jobAssets, Dependencies: dependencies, } - unitRoot := models.GenerateDestinationRequest{ Config: models.PluginConfigs{}.FromJobSpec(jobRoot.Task.Config), Assets: models.PluginAssets{}.FromJobSpec(jobRoot.Assets), @@ -766,38 +800,39 @@ func TestService(t *testing.T) { Destination: "project.dataset.table", Type: models.DestinationTypeBigquery, } - depMod.On("GenerateDestination", context.TODO(), unitRoot).Return(destinationRoot, nil).Once() - - unitDownstream := models.GenerateDestinationRequest{ - Config: models.PluginConfigs{}.FromJobSpec(jobDownstream.Task.Config), - Assets: models.PluginAssets{}.FromJobSpec(jobDownstream.Assets), - } - errorMsg := "unable to generate destination" - depMod.On("GenerateDestination", context.TODO(), unitDownstream).Return(&models.GenerateDestinationResponse{}, errors.New(errorMsg)).Once() - resourceRoot := models.ResourceSpec{ Version: 1, Name: "project.dataset.root", Type: models.ResourceTypeTable, Datastore: datastorer, } - resourceRepo := new(mock.ResourceSpecRepository) - resourceRepo.On("GetByURN", destinationRoot.URN()).Return(resourceRoot, nil).Once() - defer resourceRepo.AssertExpectations(t) + backupReq := models.BackupRequest{ + ResourceName: resourceRoot.Name, + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, + } + backupResourceReqRoot := models.BackupResourceRequest{ + Resource: resourceRoot, + BackupSpec: backupReq, + } + unitDownstream := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobDownstream.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobDownstream.Assets), + } - resourceRepoFac := new(mock.ResourceSpecRepoFactory) + depMod.On("GenerateDestination", context.TODO(), unitRoot).Return(destinationRoot, nil).Once() + dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) - defer resourceRepoFac.AssertExpectations(t) + resourceRepo.On("GetByURN", destinationRoot.URN()).Return(resourceRoot, nil).Once() + datastorer.On("BackupResource", context.TODO(), backupResourceReqRoot).Return(models.BackupResourceResponse{}, nil).Once() - backupResourceReqRoot := models.BackupResourceRequest{ - Resource: resourceRoot, - Project: projectSpec, - DryRun: true, - } - datastorer.On("BackupResource", context.TODO(), backupResourceReqRoot).Return(nil).Once() + errorMsg := "unable to generate destination" + depMod.On("GenerateDestination", context.TODO(), unitDownstream).Return(&models.GenerateDestinationResponse{}, errors.New(errorMsg)).Once() - service := datastore.NewService(resourceRepoFac, dsRepo) - resp, err := service.BackupResourceDryRun(context.TODO(), projectSpec, namespaceSpec, []models.JobSpec{jobRoot, jobDownstream}) + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) + resp, err := service.BackupResourceDryRun(context.TODO(), backupReq, []models.JobSpec{jobRoot, jobDownstream}) assert.Equal(t, errorMsg, err.Error()) assert.Nil(t, resp) @@ -814,6 +849,12 @@ func TestService(t *testing.T) { dsRepo := new(mock.SupportedDatastoreRepo) defer dsRepo.AssertExpectations(t) + + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} @@ -843,7 +884,23 @@ func TestService(t *testing.T) { Destination: "project.dataset.table", Type: models.DestinationTypeBigquery, } - depMod.On("GenerateDestination", context.TODO(), unitRoot).Return(destinationRoot, nil).Once() + resourceRoot := models.ResourceSpec{ + Version: 1, + Name: "project.dataset.root", + Type: models.ResourceTypeTable, + Datastore: datastorer, + } + backupReq := models.BackupRequest{ + ResourceName: resourceRoot.Name, + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, + } + backupResourceReqRoot := models.BackupResourceRequest{ + Resource: resourceRoot, + BackupSpec: backupReq, + } unitDownstream := models.GenerateDestinationRequest{ Config: models.PluginConfigs{}.FromJobSpec(jobDownstream.Task.Config), @@ -853,32 +910,18 @@ func TestService(t *testing.T) { Destination: "project.dataset.downstream", Type: models.DestinationTypeBigquery, } - depMod.On("GenerateDestination", context.TODO(), unitDownstream).Return(destinationDownstream, nil).Once() - resourceRoot := models.ResourceSpec{ - Version: 1, - Name: "project.dataset.root", - Type: models.ResourceTypeTable, - Datastore: datastorer, - } - resourceRepo := new(mock.ResourceSpecRepository) - defer resourceRepo.AssertExpectations(t) + depMod.On("GenerateDestination", context.TODO(), unitRoot).Return(destinationRoot, nil).Once() + resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) resourceRepo.On("GetByURN", destinationRoot.URN()).Return(resourceRoot, nil).Once() - resourceRepo.On("GetByURN", destinationDownstream.URN()).Return(models.ResourceSpec{}, store.ErrResourceNotFound).Once() + datastorer.On("BackupResource", context.TODO(), backupResourceReqRoot).Return(models.BackupResourceResponse{}, nil).Once() - resourceRepoFac := new(mock.ResourceSpecRepoFactory) - resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) - defer resourceRepoFac.AssertExpectations(t) + depMod.On("GenerateDestination", context.TODO(), unitDownstream).Return(destinationDownstream, nil).Once() + resourceRepo.On("GetByURN", destinationDownstream.URN()).Return(models.ResourceSpec{}, store.ErrResourceNotFound).Once() - backupResourceReqRoot := models.BackupResourceRequest{ - Resource: resourceRoot, - Project: projectSpec, - DryRun: true, - } - datastorer.On("BackupResource", context.TODO(), backupResourceReqRoot).Return(nil).Once() + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) + resp, err := service.BackupResourceDryRun(context.TODO(), backupReq, []models.JobSpec{jobRoot, jobDownstream}) - service := datastore.NewService(resourceRepoFac, dsRepo) - resp, err := service.BackupResourceDryRun(context.TODO(), projectSpec, namespaceSpec, []models.JobSpec{jobRoot, jobDownstream}) assert.Nil(t, err) assert.Equal(t, []string{destinationRoot.Destination}, resp) }) @@ -894,11 +937,16 @@ func TestService(t *testing.T) { dsRepo := new(mock.SupportedDatastoreRepo) defer dsRepo.AssertExpectations(t) - dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) - jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} - jobDownstream := models.JobSpec{ - ID: uuid.Must(uuid.NewRandom()), + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) + + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} + jobDownstream := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), Name: "job-2", Task: jobTask, Assets: jobAssets, @@ -914,7 +962,6 @@ func TestService(t *testing.T) { Assets: jobAssets, Dependencies: dependencies, } - unitRoot := models.GenerateDestinationRequest{ Config: models.PluginConfigs{}.FromJobSpec(jobRoot.Task.Config), Assets: models.PluginAssets{}.FromJobSpec(jobRoot.Assets), @@ -923,7 +970,23 @@ func TestService(t *testing.T) { Destination: "project.dataset.table", Type: models.DestinationTypeBigquery, } - depMod.On("GenerateDestination", context.TODO(), unitRoot).Return(destinationRoot, nil).Once() + resourceRoot := models.ResourceSpec{ + Version: 1, + Name: "project.dataset.root", + Type: models.ResourceTypeTable, + Datastore: datastorer, + } + backupReq := models.BackupRequest{ + ResourceName: resourceRoot.Name, + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, + } + backupResourceReqRoot := models.BackupResourceRequest{ + Resource: resourceRoot, + BackupSpec: backupReq, + } unitDownstream := models.GenerateDestinationRequest{ Config: models.PluginConfigs{}.FromJobSpec(jobDownstream.Task.Config), @@ -933,47 +996,967 @@ func TestService(t *testing.T) { Destination: "project.dataset.downstream", Type: models.DestinationTypeBigquery, } + resourceDownstream := models.ResourceSpec{ + Version: 1, + Name: "project.dataset.downstream", + Type: models.ResourceTypeTable, + Datastore: datastorer, + } + backupResourceReqDownstream := models.BackupResourceRequest{ + Resource: resourceDownstream, + BackupSpec: backupReq, + } + + depMod.On("GenerateDestination", context.TODO(), unitRoot).Return(destinationRoot, nil).Once() + resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) + resourceRepo.On("GetByURN", destinationRoot.URN()).Return(resourceRoot, nil).Once() + dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + datastorer.On("BackupResource", context.TODO(), backupResourceReqRoot).Return(models.BackupResourceResponse{}, nil).Once() + depMod.On("GenerateDestination", context.TODO(), unitDownstream).Return(destinationDownstream, nil).Once() + resourceRepo.On("GetByURN", destinationDownstream.URN()).Return(resourceDownstream, nil).Once() + datastorer.On("BackupResource", context.TODO(), backupResourceReqDownstream).Return(models.BackupResourceResponse{}, models.ErrUnsupportedResource).Once() + + service := datastore.NewService(resourceRepoFac, dsRepo, nil, nil) + resp, err := service.BackupResourceDryRun(context.TODO(), backupReq, []models.JobSpec{jobRoot, jobDownstream}) + + assert.Nil(t, err) + assert.Equal(t, []string{destinationRoot.Destination}, resp) + }) + }) + t.Run("BackupResource", func(t *testing.T) { + jobTask := models.JobSpecTask{ + Config: models.JobSpecConfigs{ + { + Name: "do", + Value: "this", + }, + }, + Priority: 2000, + Window: models.JobSpecTaskWindow{ + Size: time.Hour, + Offset: 0, + TruncateTo: "d", + }, + } + jobAssets := *models.JobAssets{}.New( + []models.JobSpecAsset{ + { + Name: "query.sql", + Value: "select * from 1", + }, + }) + destination := &models.GenerateDestinationResponse{ + Destination: "project.dataset.table", + Type: models.DestinationTypeBigquery, + } + backupUUID := uuid.Must(uuid.NewRandom()) + + t.Run("should able to do backup without downstreams", func(t *testing.T) { + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + execUnit := new(mock.BasePlugin) + defer execUnit.AssertExpectations(t) + + depMod := new(mock.DependencyResolverMod) + defer depMod.AssertExpectations(t) + + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) + + uuidProvider := new(mock.UUIDProvider) + defer uuidProvider.AssertExpectations(t) + + backupRepo := new(mock.BackupRepo) + defer backupRepo.AssertExpectations(t) + + backupRepoFac := new(mock.BackupRepoFactory) + defer backupRepoFac.AssertExpectations(t) + + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} + jobSpec := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "job-1", + Task: jobTask, + Assets: jobAssets, + } + unitData := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobSpec.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobSpec.Assets), + } + resourceSpec := models.ResourceSpec{ + Version: 1, + Name: "project.dataset.table", + Type: models.ResourceTypeTable, + Datastore: datastorer, + } + backupReq := models.BackupRequest{ + ID: backupUUID, + ResourceName: resourceSpec.Name, + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, + } + backupResourceReq := models.BackupResourceRequest{ + Resource: resourceSpec, + BackupSpec: backupReq, + } + resultURN := "store://backupURN" + resultSpec := map[string]interface{}{"project": projectSpec.Name, "location": "optimus_backup"} + backupResult := models.BackupResult{ + URN: resultURN, + Spec: resultSpec, + } + backupSpec := models.BackupSpec{ + ID: backupUUID, + Resource: resourceSpec, + Result: map[string]interface{}{resourceSpec.Name: backupResult}, + Description: "", + } + + depMod.On("GenerateDestination", context.TODO(), unitData).Return(destination, nil) + dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) + resourceRepo.On("GetByURN", destination.URN()).Return(resourceSpec, nil) + datastorer.On("BackupResource", context.TODO(), backupResourceReq). + Return(models.BackupResourceResponse{ResultURN: resultURN, ResultSpec: resultSpec}, nil) + uuidProvider.On("NewUUID").Return(backupUUID, nil) + backupRepoFac.On("New", projectSpec, datastorer).Return(backupRepo) + backupRepo.On("Save", backupSpec).Return(nil) + + service := datastore.NewService(resourceRepoFac, dsRepo, uuidProvider, backupRepoFac) + resp, err := service.BackupResource(context.TODO(), backupReq, []models.JobSpec{jobSpec}) + assert.Nil(t, err) + assert.Equal(t, []string{resultURN}, resp) + }) + t.Run("should able to do backup with downstreams", func(t *testing.T) { + execUnit := new(mock.BasePlugin) + defer execUnit.AssertExpectations(t) + + depMod := new(mock.DependencyResolverMod) + defer depMod.AssertExpectations(t) + + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) + + uuidProvider := new(mock.UUIDProvider) + defer uuidProvider.AssertExpectations(t) + + backupRepo := new(mock.BackupRepo) + defer backupRepo.AssertExpectations(t) + + backupRepoFac := new(mock.BackupRepoFactory) + defer backupRepoFac.AssertExpectations(t) + + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} + jobDownstream := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "job-2", + Task: jobTask, + Assets: jobAssets, + } + dependencies := make(map[string]models.JobSpecDependency) + dependencies[jobDownstream.GetName()] = models.JobSpecDependency{ + Job: &jobDownstream, + } + + //root + jobRoot := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "job-1", + Task: jobTask, + Assets: jobAssets, + Dependencies: dependencies, + } + unitRoot := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobRoot.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobRoot.Assets), + } + destinationRoot := &models.GenerateDestinationResponse{ + Destination: "project:dataset.root", + Type: models.DestinationTypeBigquery, + } resourceRoot := models.ResourceSpec{ Version: 1, Name: "project.dataset.root", Type: models.ResourceTypeTable, Datastore: datastorer, } + backupReq := models.BackupRequest{ + ID: backupUUID, + ResourceName: resourceRoot.Name, + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, + } + backupResourceReqRoot := models.BackupResourceRequest{ + Resource: resourceRoot, + BackupSpec: backupReq, + } + resultURNRoot := "store://optimus_backup:backupURNRoot" + resultSpecRoot := map[string]interface{}{ + "project": projectSpec.Name, "location": "optimus_backup", "name": "backup_resource_root", + } + backupResultRoot := models.BackupResult{ + URN: resultURNRoot, + Spec: resultSpecRoot, + } + + //downstream + unitDownstream := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobDownstream.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobDownstream.Assets), + } + destinationDownstream := &models.GenerateDestinationResponse{ + Destination: "project:dataset.downstream", + Type: models.DestinationTypeBigquery, + } resourceDownstream := models.ResourceSpec{ Version: 1, Name: "project.dataset.downstream", Type: models.ResourceTypeTable, Datastore: datastorer, } - resourceRepo := new(mock.ResourceSpecRepository) + backupResourceReqDownstream := models.BackupResourceRequest{ + Resource: resourceDownstream, + BackupSpec: backupReq, + } + resultURNDownstream := "store://optimus_backup:backupURNDownstream" + resultSpecDownstream := map[string]interface{}{ + "project": projectSpec.Name, "location": "optimus_backup", "name": "backup_resource_downstream", + } + backupResultDownstream := models.BackupResult{ + URN: resultURNDownstream, + Spec: resultSpecDownstream, + } + + backupResult := map[string]interface{}{ + destinationRoot.Destination: backupResultRoot, + destinationDownstream.Destination: backupResultDownstream, + } + backupSpec := models.BackupSpec{ + ID: backupUUID, + Resource: resourceRoot, + Result: backupResult, + Description: "", + } + + dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + + depMod.On("GenerateDestination", context.TODO(), unitRoot).Return(destinationRoot, nil).Once() resourceRepo.On("GetByURN", destinationRoot.URN()).Return(resourceRoot, nil).Once() + datastorer.On("BackupResource", context.TODO(), backupResourceReqRoot). + Return(models.BackupResourceResponse{ResultURN: resultURNRoot, ResultSpec: resultSpecRoot}, nil).Once() + + depMod.On("GenerateDestination", context.TODO(), unitDownstream).Return(destinationDownstream, nil).Once() resourceRepo.On("GetByURN", destinationDownstream.URN()).Return(resourceDownstream, nil).Once() - defer resourceRepo.AssertExpectations(t) + datastorer.On("BackupResource", context.TODO(), backupResourceReqDownstream). + Return(models.BackupResourceResponse{ResultURN: resultURNDownstream, ResultSpec: resultSpecDownstream}, nil).Once() - resourceRepoFac := new(mock.ResourceSpecRepoFactory) resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) - defer resourceRepoFac.AssertExpectations(t) - backupResourceReqRoot := models.BackupResourceRequest{ - Resource: resourceRoot, - Project: projectSpec, - DryRun: true, + uuidProvider.On("NewUUID").Return(backupUUID, nil) + backupRepoFac.On("New", projectSpec, datastorer).Return(backupRepo) + backupRepo.On("Save", backupSpec).Return(nil) + + service := datastore.NewService(resourceRepoFac, dsRepo, uuidProvider, backupRepoFac) + resp, err := service.BackupResource(context.TODO(), backupReq, []models.JobSpec{jobRoot, jobDownstream}) + + assert.Nil(t, err) + assert.Equal(t, []string{resultURNRoot, resultURNDownstream}, resp) + }) + t.Run("should return error when unable to generate destination", func(t *testing.T) { + execUnit := new(mock.BasePlugin) + defer execUnit.AssertExpectations(t) + + depMod := new(mock.DependencyResolverMod) + defer depMod.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + uuidProvider := new(mock.UUIDProvider) + defer uuidProvider.AssertExpectations(t) + + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} + jobSpec := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "job-1", + Task: jobTask, + Assets: jobAssets, + } + unitData := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobSpec.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobSpec.Assets), + } + backupReq := models.BackupRequest{ + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, } - datastorer.On("BackupResource", context.TODO(), backupResourceReqRoot).Return(nil).Once() - backupResourceReqDownstream := models.BackupResourceRequest{ - Resource: resourceDownstream, - Project: projectSpec, - DryRun: true, + uuidProvider.On("NewUUID").Return(backupUUID, nil) + + errorMsg := "unable to generate destination" + depMod.On("GenerateDestination", context.TODO(), unitData).Return(&models.GenerateDestinationResponse{}, errors.New(errorMsg)) + + service := datastore.NewService(nil, dsRepo, uuidProvider, nil) + resp, err := service.BackupResource(context.TODO(), backupReq, []models.JobSpec{jobSpec}) + + assert.Contains(t, err.Error(), errorMsg) + assert.Nil(t, resp) + }) + t.Run("should return error when unable to get datastorer", func(t *testing.T) { + execUnit := new(mock.BasePlugin) + defer execUnit.AssertExpectations(t) + + depMod := new(mock.DependencyResolverMod) + defer depMod.AssertExpectations(t) + + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + uuidProvider := new(mock.UUIDProvider) + defer uuidProvider.AssertExpectations(t) + + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} + jobSpec := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "job-1", + Task: jobTask, + Assets: jobAssets, + } + unitData := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobSpec.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobSpec.Assets), + } + backupReq := models.BackupRequest{ + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, } - datastorer.On("BackupResource", context.TODO(), backupResourceReqDownstream).Return(models.ErrUnsupportedResource).Once() - service := datastore.NewService(resourceRepoFac, dsRepo) - resp, err := service.BackupResourceDryRun(context.TODO(), projectSpec, namespaceSpec, []models.JobSpec{jobRoot, jobDownstream}) - assert.Nil(t, err) - assert.Equal(t, []string{destinationRoot.Destination}, resp) + uuidProvider.On("NewUUID").Return(backupUUID, nil) + depMod.On("GenerateDestination", context.TODO(), unitData).Return(destination, nil) + + errorMsg := "unable to get datastorer" + dsRepo.On("GetByName", destination.Type.String()).Return(datastorer, errors.New(errorMsg)) + + service := datastore.NewService(nil, dsRepo, uuidProvider, nil) + resp, err := service.BackupResource(context.TODO(), backupReq, []models.JobSpec{jobSpec}) + + assert.Contains(t, err.Error(), errorMsg) + assert.Nil(t, resp) + }) + t.Run("should return error when unable to get resource", func(t *testing.T) { + execUnit := new(mock.BasePlugin) + defer execUnit.AssertExpectations(t) + + depMod := new(mock.DependencyResolverMod) + defer depMod.AssertExpectations(t) + + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) + + uuidProvider := new(mock.UUIDProvider) + defer uuidProvider.AssertExpectations(t) + + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} + jobSpec := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "job-1", + Task: jobTask, + Assets: jobAssets, + } + + unitData := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobSpec.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobSpec.Assets), + } + backupReq := models.BackupRequest{ + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + } + + uuidProvider.On("NewUUID").Return(backupUUID, nil) + depMod.On("GenerateDestination", context.TODO(), unitData).Return(destination, nil) + dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) + + errorMsg := "unable to get resource" + resourceRepo.On("GetByURN", destination.URN()).Return(models.ResourceSpec{}, errors.New(errorMsg)) + + service := datastore.NewService(resourceRepoFac, dsRepo, uuidProvider, nil) + resp, err := service.BackupResource(context.TODO(), backupReq, []models.JobSpec{jobSpec}) + + assert.Equal(t, errorMsg, err.Error()) + assert.Nil(t, resp) + }) + t.Run("should return error when unable to do backup", func(t *testing.T) { + execUnit := new(mock.BasePlugin) + defer execUnit.AssertExpectations(t) + + depMod := new(mock.DependencyResolverMod) + defer depMod.AssertExpectations(t) + + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) + + uuidProvider := new(mock.UUIDProvider) + defer uuidProvider.AssertExpectations(t) + + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} + jobSpec := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "job-1", + Task: jobTask, + Assets: jobAssets, + } + unitData := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobSpec.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobSpec.Assets), + } + resourceSpec := models.ResourceSpec{ + Version: 1, + Name: "project.dataset.table", + Type: models.ResourceTypeTable, + Datastore: datastorer, + } + backupReq := models.BackupRequest{ + ID: backupUUID, + ResourceName: resourceSpec.Name, + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: false, + } + backupResourceReq := models.BackupResourceRequest{ + Resource: resourceSpec, + BackupSpec: backupReq, + } + + uuidProvider.On("NewUUID").Return(backupUUID, nil) + depMod.On("GenerateDestination", context.TODO(), unitData).Return(destination, nil) + dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + resourceRepo.On("GetByURN", destination.URN()).Return(resourceSpec, nil) + resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) + + errorMsg := "unable to do backup" + datastorer.On("BackupResource", context.TODO(), backupResourceReq).Return(models.BackupResourceResponse{}, errors.New(errorMsg)) + + service := datastore.NewService(resourceRepoFac, dsRepo, uuidProvider, nil) + resp, err := service.BackupResource(context.TODO(), backupReq, []models.JobSpec{jobSpec}) + + assert.Equal(t, errorMsg, err.Error()) + assert.Nil(t, resp) + }) + t.Run("should return error when unable to generate destination for downstream", func(t *testing.T) { + execUnit := new(mock.BasePlugin) + defer execUnit.AssertExpectations(t) + + depMod := new(mock.DependencyResolverMod) + defer depMod.AssertExpectations(t) + + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) + + uuidProvider := new(mock.UUIDProvider) + defer uuidProvider.AssertExpectations(t) + + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} + jobDownstream := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "job-2", + Task: jobTask, + Assets: jobAssets, + } + dependencies := make(map[string]models.JobSpecDependency) + dependencies[jobDownstream.GetName()] = models.JobSpecDependency{ + Job: &jobDownstream, + } + jobRoot := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "job-1", + Task: jobTask, + Assets: jobAssets, + Dependencies: dependencies, + } + unitRoot := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobRoot.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobRoot.Assets), + } + destinationRoot := &models.GenerateDestinationResponse{ + Destination: "project.dataset.table", + Type: models.DestinationTypeBigquery, + } + resourceRoot := models.ResourceSpec{ + Version: 1, + Name: "project.dataset.root", + Type: models.ResourceTypeTable, + Datastore: datastorer, + } + backupReq := models.BackupRequest{ + ID: backupUUID, + ResourceName: resourceRoot.Name, + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, + } + backupResourceReqRoot := models.BackupResourceRequest{ + Resource: resourceRoot, + BackupSpec: backupReq, + } + unitDownstream := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobDownstream.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobDownstream.Assets), + } + + uuidProvider.On("NewUUID").Return(backupUUID, nil) + depMod.On("GenerateDestination", context.TODO(), unitRoot).Return(destinationRoot, nil).Once() + dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) + resourceRepo.On("GetByURN", destinationRoot.URN()).Return(resourceRoot, nil).Once() + datastorer.On("BackupResource", context.TODO(), backupResourceReqRoot).Return(models.BackupResourceResponse{}, nil).Once() + + errorMsg := "unable to generate destination" + depMod.On("GenerateDestination", context.TODO(), unitDownstream).Return(&models.GenerateDestinationResponse{}, errors.New(errorMsg)).Once() + + service := datastore.NewService(resourceRepoFac, dsRepo, uuidProvider, nil) + resp, err := service.BackupResource(context.TODO(), backupReq, []models.JobSpec{jobRoot, jobDownstream}) + + assert.Equal(t, errorMsg, err.Error()) + assert.Nil(t, resp) + }) + t.Run("should not return error when one of the resources is not found", func(t *testing.T) { + execUnit := new(mock.BasePlugin) + defer execUnit.AssertExpectations(t) + + depMod := new(mock.DependencyResolverMod) + defer depMod.AssertExpectations(t) + + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) + + uuidProvider := new(mock.UUIDProvider) + defer uuidProvider.AssertExpectations(t) + + backupRepo := new(mock.BackupRepo) + defer backupRepo.AssertExpectations(t) + + backupRepoFac := new(mock.BackupRepoFactory) + defer backupRepoFac.AssertExpectations(t) + + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} + jobDownstream := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "job-2", + Task: jobTask, + Assets: jobAssets, + } + dependencies := make(map[string]models.JobSpecDependency) + dependencies[jobDownstream.GetName()] = models.JobSpecDependency{ + Job: &jobDownstream, + } + jobRoot := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "job-1", + Task: jobTask, + Assets: jobAssets, + Dependencies: dependencies, + } + + unitRoot := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobRoot.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobRoot.Assets), + } + destinationRoot := &models.GenerateDestinationResponse{ + Destination: "project.dataset.table", + Type: models.DestinationTypeBigquery, + } + resourceRoot := models.ResourceSpec{ + Version: 1, + Name: "project.dataset.root", + Type: models.ResourceTypeTable, + Datastore: datastorer, + } + backupReq := models.BackupRequest{ + ID: backupUUID, + ResourceName: resourceRoot.Name, + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, + } + backupResourceReqRoot := models.BackupResourceRequest{ + Resource: resourceRoot, + BackupSpec: backupReq, + } + resultURNRoot := "store://optimus_backup:backupURNRoot" + resultSpecRoot := map[string]interface{}{ + "project": projectSpec.Name, "location": "optimus_backup", "name": "backup_resource_root", + } + backupResultRoot := models.BackupResult{ + URN: resultURNRoot, + Spec: resultSpecRoot, + } + + unitDownstream := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobDownstream.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobDownstream.Assets), + } + destinationDownstream := &models.GenerateDestinationResponse{ + Destination: "project.dataset.downstream", + Type: models.DestinationTypeBigquery, + } + + backupResult := map[string]interface{}{ + destinationRoot.Destination: backupResultRoot, + } + backupSpec := models.BackupSpec{ + ID: backupUUID, + Resource: resourceRoot, + Result: backupResult, + Description: "", + } + + uuidProvider.On("NewUUID").Return(backupUUID, nil) + depMod.On("GenerateDestination", context.TODO(), unitRoot).Return(destinationRoot, nil).Once() + resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) + resourceRepo.On("GetByURN", destinationRoot.URN()).Return(resourceRoot, nil).Once() + dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + datastorer.On("BackupResource", context.TODO(), backupResourceReqRoot). + Return(models.BackupResourceResponse{ResultURN: resultURNRoot, ResultSpec: resultSpecRoot}, nil).Once() + + depMod.On("GenerateDestination", context.TODO(), unitDownstream).Return(destinationDownstream, nil).Once() + resourceRepo.On("GetByURN", destinationDownstream.URN()).Return(models.ResourceSpec{}, store.ErrResourceNotFound).Once() + + backupRepoFac.On("New", projectSpec, datastorer).Return(backupRepo) + backupRepo.On("Save", backupSpec).Return(nil) + + service := datastore.NewService(resourceRepoFac, dsRepo, uuidProvider, backupRepoFac) + resp, err := service.BackupResource(context.TODO(), backupReq, []models.JobSpec{jobRoot, jobDownstream}) + + assert.Nil(t, err) + assert.Equal(t, []string{resultURNRoot}, resp) + }) + t.Run("should not return error when one of the resources is not supported", func(t *testing.T) { + execUnit := new(mock.BasePlugin) + defer execUnit.AssertExpectations(t) + + depMod := new(mock.DependencyResolverMod) + defer depMod.AssertExpectations(t) + + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + resourceRepo := new(mock.ResourceSpecRepository) + defer resourceRepo.AssertExpectations(t) + + resourceRepoFac := new(mock.ResourceSpecRepoFactory) + defer resourceRepoFac.AssertExpectations(t) + + uuidProvider := new(mock.UUIDProvider) + defer uuidProvider.AssertExpectations(t) + + backupRepo := new(mock.BackupRepo) + defer backupRepo.AssertExpectations(t) + + backupRepoFac := new(mock.BackupRepoFactory) + defer backupRepoFac.AssertExpectations(t) + + jobTask.Unit = &models.Plugin{Base: execUnit, DependencyMod: depMod} + jobDownstream := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "job-2", + Task: jobTask, + Assets: jobAssets, + } + dependencies := make(map[string]models.JobSpecDependency) + dependencies[jobDownstream.GetName()] = models.JobSpecDependency{ + Job: &jobDownstream, + } + jobRoot := models.JobSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "job-1", + Task: jobTask, + Assets: jobAssets, + Dependencies: dependencies, + } + unitRoot := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobRoot.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobRoot.Assets), + } + destinationRoot := &models.GenerateDestinationResponse{ + Destination: "project.dataset.table", + Type: models.DestinationTypeBigquery, + } + resourceRoot := models.ResourceSpec{ + Version: 1, + Name: "project.dataset.root", + Type: models.ResourceTypeTable, + Datastore: datastorer, + } + backupReq := models.BackupRequest{ + ID: backupUUID, + ResourceName: resourceRoot.Name, + Project: projectSpec, + Namespace: namespaceSpec, + IgnoreDownstream: false, + DryRun: true, + } + backupResourceReqRoot := models.BackupResourceRequest{ + Resource: resourceRoot, + BackupSpec: backupReq, + } + resultURNRoot := "store://optimus_backup:backupURNRoot" + resultSpecRoot := map[string]interface{}{ + "project": projectSpec.Name, "location": "optimus_backup", "name": "backup_resource_root", + } + backupResultRoot := models.BackupResult{ + URN: resultURNRoot, + Spec: resultSpecRoot, + } + + unitDownstream := models.GenerateDestinationRequest{ + Config: models.PluginConfigs{}.FromJobSpec(jobDownstream.Task.Config), + Assets: models.PluginAssets{}.FromJobSpec(jobDownstream.Assets), + } + destinationDownstream := &models.GenerateDestinationResponse{ + Destination: "project.dataset.downstream", + Type: models.DestinationTypeBigquery, + } + resourceDownstream := models.ResourceSpec{ + Version: 1, + Name: "project.dataset.downstream", + Type: models.ResourceTypeTable, + Datastore: datastorer, + } + backupResourceReqDownstream := models.BackupResourceRequest{ + Resource: resourceDownstream, + BackupSpec: backupReq, + } + + backupResult := map[string]interface{}{ + destinationRoot.Destination: backupResultRoot, + } + backupSpec := models.BackupSpec{ + ID: backupUUID, + Resource: resourceRoot, + Result: backupResult, + Description: "", + } + + uuidProvider.On("NewUUID").Return(backupUUID, nil) + depMod.On("GenerateDestination", context.TODO(), unitRoot).Return(destinationRoot, nil).Once() + resourceRepoFac.On("New", namespaceSpec, datastorer).Return(resourceRepo) + resourceRepo.On("GetByURN", destinationRoot.URN()).Return(resourceRoot, nil).Once() + dsRepo.On("GetByName", models.DestinationTypeBigquery.String()).Return(datastorer, nil) + datastorer.On("BackupResource", context.TODO(), backupResourceReqRoot). + Return(models.BackupResourceResponse{ResultURN: resultURNRoot, ResultSpec: resultSpecRoot}, nil).Once() + + depMod.On("GenerateDestination", context.TODO(), unitDownstream).Return(destinationDownstream, nil).Once() + resourceRepo.On("GetByURN", destinationDownstream.URN()).Return(resourceDownstream, nil).Once() + datastorer.On("BackupResource", context.TODO(), backupResourceReqDownstream).Return(models.BackupResourceResponse{}, models.ErrUnsupportedResource).Once() + + backupRepoFac.On("New", projectSpec, datastorer).Return(backupRepo) + backupRepo.On("Save", backupSpec).Return(nil) + + service := datastore.NewService(resourceRepoFac, dsRepo, uuidProvider, backupRepoFac) + resp, err := service.BackupResource(context.TODO(), backupReq, []models.JobSpec{jobRoot, jobDownstream}) + + assert.Nil(t, err) + assert.Equal(t, []string{resultURNRoot}, resp) + }) + }) + t.Run("ListBackupResources", func(t *testing.T) { + datastoreName := models.DestinationTypeBigquery.String() + backupSpecs := []models.BackupSpec{ + { + ID: uuid.Must(uuid.NewRandom()), + CreatedAt: time.Now().Add(time.Hour * 24 * -30), + }, + { + ID: uuid.Must(uuid.NewRandom()), + CreatedAt: time.Now().Add(time.Hour * 24 * -50), + }, + { + ID: uuid.Must(uuid.NewRandom()), + CreatedAt: time.Now().Add(time.Hour * 24 * -100), + }, + } + t.Run("should return list of recent backups", func(t *testing.T) { + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + backupRepo := new(mock.BackupRepo) + defer backupRepo.AssertExpectations(t) + + backupRepoFac := new(mock.BackupRepoFactory) + defer backupRepoFac.AssertExpectations(t) + + dsRepo.On("GetByName", datastoreName).Return(datastorer, nil) + backupRepoFac.On("New", projectSpec, datastorer).Return(backupRepo) + backupRepo.On("GetAll").Return(backupSpecs, nil) + + service := datastore.NewService(nil, dsRepo, nil, backupRepoFac) + resp, err := service.ListBackupResources(projectSpec, datastoreName) + + assert.Nil(t, err) + assert.Equal(t, []models.BackupSpec{backupSpecs[0], backupSpecs[1]}, resp) + }) + t.Run("should fail when unable to get datastore", func(t *testing.T) { + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + errorMsg := "unable to get datastore" + dsRepo.On("GetByName", datastoreName).Return(datastorer, errors.New(errorMsg)) + + service := datastore.NewService(nil, dsRepo, nil, nil) + resp, err := service.ListBackupResources(projectSpec, datastoreName) + + assert.Equal(t, errorMsg, err.Error()) + assert.Equal(t, []models.BackupSpec{}, resp) + }) + t.Run("should fail when unable to get backups", func(t *testing.T) { + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + backupRepo := new(mock.BackupRepo) + defer backupRepo.AssertExpectations(t) + + backupRepoFac := new(mock.BackupRepoFactory) + defer backupRepoFac.AssertExpectations(t) + + dsRepo.On("GetByName", datastoreName).Return(datastorer, nil) + backupRepoFac.On("New", projectSpec, datastorer).Return(backupRepo) + + errorMsg := "unable to get backups" + backupRepo.On("GetAll").Return([]models.BackupSpec{}, errors.New(errorMsg)) + + service := datastore.NewService(nil, dsRepo, nil, backupRepoFac) + resp, err := service.ListBackupResources(projectSpec, datastoreName) + + assert.Equal(t, errorMsg, err.Error()) + assert.Equal(t, []models.BackupSpec{}, resp) + }) + t.Run("should not return error when no backups are found", func(t *testing.T) { + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + backupRepo := new(mock.BackupRepo) + defer backupRepo.AssertExpectations(t) + + backupRepoFac := new(mock.BackupRepoFactory) + defer backupRepoFac.AssertExpectations(t) + + dsRepo.On("GetByName", datastoreName).Return(datastorer, nil) + backupRepoFac.On("New", projectSpec, datastorer).Return(backupRepo) + backupRepo.On("GetAll").Return([]models.BackupSpec{}, store.ErrResourceNotFound) + + service := datastore.NewService(nil, dsRepo, nil, backupRepoFac) + resp, err := service.ListBackupResources(projectSpec, datastoreName) + + assert.Nil(t, err) + assert.Equal(t, []models.BackupSpec{}, resp) + }) + t.Run("should not return error when no recent backups are found", func(t *testing.T) { + datastorer := new(mock.Datastorer) + defer datastorer.AssertExpectations(t) + + dsRepo := new(mock.SupportedDatastoreRepo) + defer dsRepo.AssertExpectations(t) + + backupRepo := new(mock.BackupRepo) + defer backupRepo.AssertExpectations(t) + + backupRepoFac := new(mock.BackupRepoFactory) + defer backupRepoFac.AssertExpectations(t) + + dsRepo.On("GetByName", datastoreName).Return(datastorer, nil) + backupRepoFac.On("New", projectSpec, datastorer).Return(backupRepo) + backupRepo.On("GetAll").Return([]models.BackupSpec{backupSpecs[2]}, nil) + + service := datastore.NewService(nil, dsRepo, nil, backupRepoFac) + resp, err := service.ListBackupResources(projectSpec, datastoreName) + + assert.Nil(t, err) + assert.Equal(t, 0, len(resp)) }) }) } diff --git a/ext/datastore/bigquery/bigquery.go b/ext/datastore/bigquery/bigquery.go index c8ffc88d96..dae0742a67 100644 --- a/ext/datastore/bigquery/bigquery.go +++ b/ext/datastore/bigquery/bigquery.go @@ -157,11 +157,26 @@ func (b *BigQuery) DeleteResource(ctx context.Context, request models.DeleteReso return fmt.Errorf("unsupported resource type %s", request.Resource.Type) } -func (b *BigQuery) BackupResource(ctx context.Context, request models.BackupResourceRequest) error { +func (b *BigQuery) BackupResource(ctx context.Context, request models.BackupResourceRequest) (models.BackupResourceResponse, error) { if request.Resource.Type != models.ResourceTypeTable { - return models.ErrUnsupportedResource + return models.BackupResourceResponse{}, models.ErrUnsupportedResource } - return nil + + if request.BackupSpec.DryRun { + return models.BackupResourceResponse{}, nil + } + + svcAcc, ok := request.BackupSpec.Project.Secret.GetByName(SecretName) + if !ok || len(svcAcc) == 0 { + return models.BackupResourceResponse{}, errors.Errorf(errSecretNotFoundStr, SecretName, b.Name()) + } + + client, err := b.ClientFac.New(ctx, svcAcc) + if err != nil { + return models.BackupResourceResponse{}, err + } + + return backupTable(ctx, request, client) } func init() { diff --git a/ext/datastore/bigquery/bigquery_test.go b/ext/datastore/bigquery/bigquery_test.go index 4234538445..3b61adf454 100644 --- a/ext/datastore/bigquery/bigquery_test.go +++ b/ext/datastore/bigquery/bigquery_test.go @@ -3,7 +3,13 @@ package bigquery import ( "context" "errors" + "fmt" "testing" + "time" + + "cloud.google.com/go/bigquery" + "github.com/google/uuid" + "github.com/googleapis/google-cloud-go-testing/bigquery/bqiface" "github.com/odpf/optimus/models" "github.com/stretchr/testify/assert" @@ -450,6 +456,122 @@ func TestBigquery(t *testing.T) { }) t.Run("BackupResource", func(t *testing.T) { t.Run("should not return error when resource supported", func(t *testing.T) { + spec := BQTable{ + Project: "project", + Dataset: "dataset", + Table: "table", + } + eTag := "unique ID" + tableMetadata := &bigquery.TableMetadata{ + Name: spec.Table, + Schema: bigquery.Schema{ + { + Name: "message", + Type: "STRING", + }, + { + Name: "message_type", + Type: "STRING", + }, + { + Name: "recipient", + Type: "STRING", + Repeated: true, + }, + { + Name: "time", + Type: "TIME", + }, + }, + Clustering: &bigquery.Clustering{ + Fields: []string{"message_type"}, + }, + ETag: eTag, + } + resourceSpec := models.ResourceSpec{ + Name: "project:dataset.table", + Spec: spec, + Type: models.ResourceTypeTable, + } + backupTime := time.Now() + resourceRequest := models.BackupResourceRequest{ + Resource: resourceSpec, + BackupSpec: models.BackupRequest{ + Project: projectSpec, + Config: map[string]string{ + BackupConfigTTL: "720h", + BackupConfigDataset: "optimus_backup", + BackupConfigPrefix: "backup", + }, + ID: uuid.Must(uuid.NewRandom()), + }, + BackupTime: backupTime, + } + + destinationTable := BQTable{ + Project: spec.Project, + Dataset: resourceRequest.BackupSpec.Config[BackupConfigDataset], + Table: fmt.Sprintf("backup_dataset_table_%s", resourceRequest.BackupSpec.ID), + } + resultURN := fmt.Sprintf(tableURNFormat, BigQuery{}.Name(), destinationTable.Project, destinationTable.Dataset, destinationTable.Table) + + datasetMetadata := bqiface.DatasetMetadata{ + DatasetMetadata: bigquery.DatasetMetadata{ + ETag: eTag, + }, + } + + toUpdate := bigquery.TableMetadataToUpdate{ + ExpirationTime: resourceRequest.BackupTime.Add(time.Hour * 24 * 30), + } + + bQClient := new(BqClientMock) + defer bQClient.AssertExpectations(t) + + bQClientFactory := new(BQClientFactoryMock) + defer bQClientFactory.AssertExpectations(t) + + bQDatasetHandle := new(BqDatasetMock) + defer bQDatasetHandle.AssertExpectations(t) + + bQTable := new(BqTableMock) + defer bQTable.AssertExpectations(t) + + bQCopier := new(BqCopierMock) + defer bQCopier.AssertExpectations(t) + + bQJob := new(BqJobMock) + defer bQJob.AssertExpectations(t) + + bQClientFactory.On("New", testingContext, secret).Return(bQClient, nil) + + //duplicate table + bQClient.On("DatasetInProject", spec.Project, spec.Dataset).Return(bQDatasetHandle).Once() + bQClient.On("DatasetInProject", destinationTable.Project, destinationTable.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, nil) + bQDatasetHandle.On("Table", spec.Table).Return(bQTable) + bQDatasetHandle.On("Table", destinationTable.Table).Return(bQTable) + bQTable.On("CopierFrom", []bqiface.Table{bQTable}).Return(bQCopier) + bQCopier.On("Run", testingContext).Return(bQJob, nil) + bQJob.On("Wait", testingContext).Return(&bigquery.JobStatus{}, nil) + + //update expiry + bQTable.On("Metadata", testingContext).Return(tableMetadata, nil).Once() + bQTable.On("Update", testingContext, toUpdate, eTag).Return(tableMetadata, nil) + + //verify + bQTable.On("Metadata", testingContext).Return(tableMetadata, nil).Once() + bq := BigQuery{ + ClientFac: bQClientFactory, + } + + resp, err := bq.BackupResource(testingContext, resourceRequest) + + assert.Nil(t, err) + assert.Equal(t, resultURN, resp.ResultURN) + assert.Equal(t, destinationTable, resp.ResultSpec) + }) + t.Run("should return error when resource is not supported", func(t *testing.T) { resourceSpec := models.ResourceSpec{ Name: "project:dataset.table", Spec: BQTable{ @@ -457,19 +579,19 @@ func TestBigquery(t *testing.T) { Dataset: "dataset", Table: "table", }, - Type: models.ResourceTypeTable, + Type: models.ResourceTypeView, } resourceRequest := models.BackupResourceRequest{ Resource: resourceSpec, - Project: projectSpec, } bq := BigQuery{} - err := bq.BackupResource(testingContext, resourceRequest) + resp, err := bq.BackupResource(testingContext, resourceRequest) - assert.Nil(t, err) + assert.Equal(t, models.ErrUnsupportedResource, err) + assert.Equal(t, models.BackupResourceResponse{}, resp) }) - t.Run("should return error when resource is not supported", func(t *testing.T) { + t.Run("should return error when datastore secret is not available", func(t *testing.T) { resourceSpec := models.ResourceSpec{ Name: "project:dataset.table", Spec: BQTable{ @@ -477,17 +599,64 @@ func TestBigquery(t *testing.T) { Dataset: "dataset", Table: "table", }, - Type: models.ResourceTypeView, + Type: models.ResourceTypeTable, } resourceRequest := models.BackupResourceRequest{ Resource: resourceSpec, - Project: projectSpec, + BackupSpec: models.BackupRequest{ + Project: models.ProjectSpec{ + Secret: models.ProjectSecrets{{ + Name: "other_secret", + Value: secret, + }}, + }, + }, } bq := BigQuery{} - err := bq.BackupResource(testingContext, resourceRequest) + resp, err := bq.BackupResource(testingContext, resourceRequest) - assert.Equal(t, models.ErrUnsupportedResource, err) + assert.Equal(t, fmt.Sprintf(errSecretNotFoundStr, SecretName, bq.Name()), err.Error()) + assert.Equal(t, models.BackupResourceResponse{}, resp) + }) + t.Run("should return error when unable to create bq client", func(t *testing.T) { + resourceSpec := models.ResourceSpec{ + Name: "project:dataset.table", + Spec: BQTable{ + Project: "project", + Dataset: "dataset", + Table: "table", + }, + Type: models.ResourceTypeTable, + } + resourceRequest := models.BackupResourceRequest{ + Resource: resourceSpec, + BackupSpec: models.BackupRequest{ + Project: models.ProjectSpec{ + Secret: models.ProjectSecrets{{ + Name: SecretName, + Value: secret, + }}, + }, + }, + } + + bQClient := new(BqClientMock) + defer bQClient.AssertExpectations(t) + + bQClientFactory := new(BQClientFactoryMock) + defer bQClientFactory.AssertExpectations(t) + + errorMsg := "bq client failed" + bQClientFactory.On("New", testingContext, secret).Return(bQClient, errors.New(errorMsg)) + + bq := BigQuery{ + ClientFac: bQClientFactory, + } + resp, err := bq.BackupResource(testingContext, resourceRequest) + + assert.Equal(t, errorMsg, err.Error()) + assert.Equal(t, models.BackupResourceResponse{}, resp) }) }) } diff --git a/ext/datastore/bigquery/mock.go b/ext/datastore/bigquery/mock.go index ba9358bdab..536fde14b9 100644 --- a/ext/datastore/bigquery/mock.go +++ b/ext/datastore/bigquery/mock.go @@ -189,3 +189,59 @@ func (b *BigQueryMock) ReadResource(ctx context.Context, request models.ReadReso func (b *BigQueryMock) DeleteResource(ctx context.Context, request models.DeleteResourceRequest) error { panic("not implemented") } + +type BqCopierMock struct { + mock.Mock + bqiface.Copier +} + +func (copier *BqCopierMock) JobIDConfig() *bigquery.JobIDConfig { + panic("not implemented") +} + +func (copier *BqCopierMock) SetCopyConfig(c bqiface.CopyConfig) { + panic("not implemented") +} + +func (copier *BqCopierMock) Run(ctx context.Context) (bqiface.Job, error) { + args := copier.Called(ctx) + return args.Get(0).(bqiface.Job), args.Error(1) +} + +type BqJobMock struct { + mock.Mock + bqiface.Job +} + +func (job *BqJobMock) ID() string { + panic("not implemented") +} + +func (job *BqJobMock) Location() string { + panic("not implemented") +} + +func (job *BqJobMock) Config() (bigquery.JobConfig, error) { + panic("not implemented") +} + +func (job *BqJobMock) Status(ctx context.Context) (*bigquery.JobStatus, error) { + panic("not implemented") +} + +func (job *BqJobMock) LastStatus() *bigquery.JobStatus { + panic("not implemented") +} + +func (job *BqJobMock) Cancel(ctx context.Context) error { + panic("not implemented") +} + +func (job *BqJobMock) Wait(ctx context.Context) (*bigquery.JobStatus, error) { + args := job.Called(ctx) + return args.Get(0).(*bigquery.JobStatus), args.Error(1) +} + +func (job *BqJobMock) Read(ctx context.Context) (bqiface.RowIterator, error) { + panic("not implemented") +} diff --git a/ext/datastore/bigquery/table.go b/ext/datastore/bigquery/table.go index b87eaf2c97..68dea51d8b 100644 --- a/ext/datastore/bigquery/table.go +++ b/ext/datastore/bigquery/table.go @@ -2,8 +2,12 @@ package bigquery import ( "context" + "fmt" "net/http" "regexp" + "time" + + "cloud.google.com/go/bigquery" "github.com/googleapis/google-cloud-go-testing/bigquery/bqiface" "github.com/odpf/optimus/models" @@ -13,12 +17,22 @@ import ( var ( tableNameParseRegex = regexp.MustCompile(`^([\w-]+)\.(\w+)\.([\w-]+)$`) + errorReadTableSpec = "failed to read table spec for bigquery" +) + +const ( + BackupConfigDataset = "dataset" + BackupConfigPrefix = "prefix" + BackupConfigTTL = "ttl" + defaultBackupDataset = "optimus_backup" + defaultBackupPrefix = "backup" + defaultBackupTTL = time.Hour * 720 ) func createTable(ctx context.Context, spec models.ResourceSpec, client bqiface.Client, upsert bool) error { bqResource, ok := spec.Spec.(BQTable) if !ok { - return errors.New("failed to read table spec for bigquery") + return errors.New(errorReadTableSpec) } // inherit from base @@ -67,7 +81,7 @@ func getTable(ctx context.Context, resourceSpec models.ResourceSpec, client bqif var bqResource BQTable bqResource, ok := resourceSpec.Spec.(BQTable) if !ok { - return models.ResourceSpec{}, errors.New("failed to read table spec for bigquery") + return models.ResourceSpec{}, errors.New(errorReadTableSpec) } dataset := client.DatasetInProject(bqResource.Project, bqResource.Dataset) @@ -114,7 +128,7 @@ func getTable(ctx context.Context, resourceSpec models.ResourceSpec, client bqif func deleteTable(ctx context.Context, resourceSpec models.ResourceSpec, client bqiface.Client) error { bqTable, ok := resourceSpec.Spec.(BQTable) if !ok { - return errors.New("failed to read table spec for bigquery") + return errors.New(errorReadTableSpec) } dataset := client.DatasetInProject(bqTable.Project, bqTable.Dataset) if _, err := dataset.Metadata(ctx); err != nil { @@ -124,3 +138,115 @@ func deleteTable(ctx context.Context, resourceSpec models.ResourceSpec, client b table := dataset.Table(bqTable.Table) return table.Delete(ctx) } + +func backupTable(ctx context.Context, request models.BackupResourceRequest, client bqiface.Client) (models.BackupResourceResponse, error) { + bqResourceSrc, ok := request.Resource.Spec.(BQTable) + if !ok { + return models.BackupResourceResponse{}, errors.New(errorReadTableSpec) + } + + bqResourceDst := prepareBQResourceDst(bqResourceSrc, request.BackupSpec) + + tableDst, err := duplicateTable(ctx, client, bqResourceSrc, bqResourceDst) + if err != nil { + return models.BackupResourceResponse{}, err + } + + tableDst, err = updateExpiry(ctx, tableDst, request) + if err != nil { + return models.BackupResourceResponse{}, err + } + + if err := ensureTable(ctx, tableDst, bqResourceDst, false); err != nil { + return models.BackupResourceResponse{}, err + } + + resultURN, err := tableSpec{}.GenerateURN(bqResourceDst) + if err != nil { + return models.BackupResourceResponse{}, err + } + + return models.BackupResourceResponse{ + ResultURN: resultURN, + ResultSpec: bqResourceDst, + }, nil +} + +func prepareBQResourceDst(bqResourceSrc BQTable, backupSpec models.BackupRequest) BQTable { + datasetValue, ok := backupSpec.Config[BackupConfigDataset] + if !ok { + datasetValue = defaultBackupDataset + } + + prefixValue, ok := backupSpec.Config[BackupConfigPrefix] + if !ok { + prefixValue = defaultBackupPrefix + } + + return BQTable{ + Project: bqResourceSrc.Project, + Dataset: datasetValue, + Table: fmt.Sprintf("%s_%s_%s_%s", prefixValue, bqResourceSrc.Dataset, bqResourceSrc.Table, backupSpec.ID), + } +} + +func duplicateTable(ctx context.Context, client bqiface.Client, bqResourceSrc BQTable, bqResourceDst BQTable) (bqiface.Table, error) { + // make sure dataset is present + datasetDst := client.DatasetInProject(bqResourceDst.Project, bqResourceDst.Dataset) + if err := ensureDataset(ctx, datasetDst, BQDataset{ + Project: bqResourceSrc.Project, + Dataset: bqResourceSrc.Dataset, + Metadata: BQDatasetMetadata{}, + }, false); err != nil { + return nil, err + } + + datasetSrc := client.DatasetInProject(bqResourceSrc.Project, bqResourceSrc.Dataset) + if _, err := datasetSrc.Metadata(ctx); err != nil { + return nil, err + } + + // duplicate table + tableSrc := datasetSrc.Table(bqResourceSrc.Table) + tableDst := datasetDst.Table(bqResourceDst.Table) + + copier := tableDst.CopierFrom(tableSrc) + job, err := copier.Run(ctx) + if err != nil { + return nil, err + } + status, err := job.Wait(ctx) + if err != nil { + return nil, err + } + if err := status.Err(); err != nil { + return nil, err + } + return tableDst, nil +} + +func updateExpiry(ctx context.Context, tableDst bqiface.Table, req models.BackupResourceRequest) (bqiface.Table, error) { + meta, err := tableDst.Metadata(ctx) + if err != nil { + return nil, err + } + + var ttl time.Duration + ttlStr, ok := req.BackupSpec.Config[BackupConfigTTL] + if ok { + ttl, err = time.ParseDuration(ttlStr) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse bigquery backup TTL %s", ttlStr) + } + } else { + ttl = defaultBackupTTL + } + + update := bigquery.TableMetadataToUpdate{ + ExpirationTime: req.BackupTime.Add(ttl), + } + if _, err = tableDst.Update(ctx, update, meta.ETag); err != nil { + return nil, err + } + return tableDst, nil +} diff --git a/ext/datastore/bigquery/table_test.go b/ext/datastore/bigquery/table_test.go index 24ca31ac06..32037727da 100644 --- a/ext/datastore/bigquery/table_test.go +++ b/ext/datastore/bigquery/table_test.go @@ -2,7 +2,11 @@ package bigquery import ( "context" + "fmt" "testing" + "time" + + "github.com/google/uuid" "cloud.google.com/go/bigquery" "github.com/googleapis/google-cloud-go-testing/bigquery/bqiface" @@ -353,4 +357,347 @@ func TestTable(t *testing.T) { assert.NotNil(t, err) }) }) + + t.Run("backupTable", func(t *testing.T) { + eTag := "uniqueID" + tableMetadata := &bigquery.TableMetadata{ + Name: bQResource.Table, + Schema: bigquery.Schema{ + { + Name: "message", + Type: "STRING", + }, + { + Name: "message_type", + Type: "STRING", + }, + { + Name: "recipient", + Type: "STRING", + Repeated: true, + }, + { + Name: "time", + Type: "TIME", + }, + }, + Clustering: &bigquery.Clustering{ + Fields: []string{"message_type"}, + }, + ETag: eTag, + } + resourceSpec := models.ResourceSpec{ + Spec: bQResource, + Type: models.ResourceTypeTable, + } + destinationConfig := map[string]string{ + BackupConfigTTL: "720h", + BackupConfigDataset: "optimus_backup", + BackupConfigPrefix: "backup", + } + request := models.BackupResourceRequest{ + Resource: resourceSpec, + BackupSpec: models.BackupRequest{ + ID: uuid.Must(uuid.NewRandom()), + Config: destinationConfig, + }, + BackupTime: time.Now(), + } + destinationTable := BQTable{ + Project: bQResource.Project, + Dataset: request.BackupSpec.Config[BackupConfigDataset], + Table: fmt.Sprintf("backup_dataset_table_%s", request.BackupSpec.ID), + } + + datasetMetadata := bqiface.DatasetMetadata{ + DatasetMetadata: bigquery.DatasetMetadata{ + ETag: eTag, + }, + } + toUpdate := bigquery.TableMetadataToUpdate{ + ExpirationTime: request.BackupTime.Add(time.Hour * 24 * 30), + } + resultURN := fmt.Sprintf(tableURNFormat, BigQuery{}.Name(), destinationTable.Project, destinationTable.Dataset, destinationTable.Table) + t.Run("should able to backup table if given valid input", func(t *testing.T) { + bQClient := new(BqClientMock) + defer bQClient.AssertExpectations(t) + + bQDatasetHandle := new(BqDatasetMock) + defer bQDatasetHandle.AssertExpectations(t) + + bQTable := new(BqTableMock) + defer bQTable.AssertExpectations(t) + + bQCopier := new(BqCopierMock) + defer bQCopier.AssertExpectations(t) + + bQJob := new(BqJobMock) + defer bQJob.AssertExpectations(t) + + //duplicate table + bQClient.On("DatasetInProject", bQResource.Project, bQResource.Dataset).Return(bQDatasetHandle).Once() + bQClient.On("DatasetInProject", destinationTable.Project, destinationTable.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, nil) + bQDatasetHandle.On("Table", bQResource.Table).Return(bQTable) + bQDatasetHandle.On("Table", destinationTable.Table).Return(bQTable) + bQTable.On("CopierFrom", []bqiface.Table{bQTable}).Return(bQCopier) + bQCopier.On("Run", testingContext).Return(bQJob, nil) + bQJob.On("Wait", testingContext).Return(&bigquery.JobStatus{}, nil) + + //update expiry + bQTable.On("Metadata", testingContext).Return(tableMetadata, nil).Once() + bQTable.On("Update", testingContext, toUpdate, eTag).Return(tableMetadata, nil) + + //verify + bQTable.On("Metadata", testingContext).Return(tableMetadata, nil).Once() + + resp, err := backupTable(testingContext, request, bQClient) + + assert.Nil(t, err) + assert.Equal(t, resultURN, resp.ResultURN) + assert.Equal(t, destinationTable, resp.ResultSpec) + }) + t.Run("should fail when unable to read resource spec", func(t *testing.T) { + invalidResourceSpec := models.ResourceSpec{ + Spec: "invalid spec", + Type: models.ResourceTypeTable, + } + invalidRequest := models.BackupResourceRequest{ + Resource: invalidResourceSpec, + BackupSpec: models.BackupRequest{ + ID: uuid.Must(uuid.NewRandom()), + Config: destinationConfig, + }, + BackupTime: time.Now(), + } + + bQClient := new(BqClientMock) + defer bQClient.AssertExpectations(t) + + resp, err := backupTable(testingContext, invalidRequest, bQClient) + + assert.Equal(t, errorReadTableSpec, err.Error()) + assert.Equal(t, models.BackupResourceResponse{}, resp) + }) + t.Run("should fail when destination dataset is not available and cannot be created", func(t *testing.T) { + bQClient := new(BqClientMock) + defer bQClient.AssertExpectations(t) + + bQDatasetHandle := new(BqDatasetMock) + defer bQDatasetHandle.AssertExpectations(t) + + errorMsg := "unable to get dataset metadata" + + //duplicate table + bQClient.On("DatasetInProject", destinationTable.Project, destinationTable.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&bqiface.DatasetMetadata{}, errors.New(errorMsg)) + resp, err := backupTable(testingContext, request, bQClient) + + assert.Equal(t, errorMsg, err.Error()) + assert.Equal(t, models.BackupResourceResponse{}, resp) + }) + t.Run("should fail when unable to get source dataset metadata", func(t *testing.T) { + bQClient := new(BqClientMock) + defer bQClient.AssertExpectations(t) + + bQDatasetHandle := new(BqDatasetMock) + defer bQDatasetHandle.AssertExpectations(t) + + errorMsg := "unable to get dataset metadata" + + //duplicate table + bQClient.On("DatasetInProject", destinationTable.Project, destinationTable.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, nil).Once() + bQClient.On("DatasetInProject", bQResource.Project, bQResource.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, errors.New(errorMsg)).Once() + + resp, err := backupTable(testingContext, request, bQClient) + + assert.Equal(t, errorMsg, err.Error()) + assert.Equal(t, models.BackupResourceResponse{}, resp) + }) + t.Run("should fail when unable to copy source table", func(t *testing.T) { + bQClient := new(BqClientMock) + defer bQClient.AssertExpectations(t) + + bQDatasetHandle := new(BqDatasetMock) + defer bQDatasetHandle.AssertExpectations(t) + + bQTable := new(BqTableMock) + defer bQTable.AssertExpectations(t) + + bQCopier := new(BqCopierMock) + defer bQCopier.AssertExpectations(t) + + bQJob := new(BqJobMock) + defer bQJob.AssertExpectations(t) + + errorMsg := "unable to copy table" + + //duplicate table + bQClient.On("DatasetInProject", destinationTable.Project, destinationTable.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, nil).Once() + bQClient.On("DatasetInProject", bQResource.Project, bQResource.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, nil).Once() + bQDatasetHandle.On("Table", bQResource.Table).Return(bQTable) + bQDatasetHandle.On("Table", destinationTable.Table).Return(bQTable) + bQTable.On("CopierFrom", []bqiface.Table{bQTable}).Return(bQCopier) + bQCopier.On("Run", testingContext).Return(bQJob, errors.New(errorMsg)) + + resp, err := backupTable(testingContext, request, bQClient) + + assert.Equal(t, errorMsg, err.Error()) + assert.Equal(t, models.BackupResourceResponse{}, resp) + }) + t.Run("should fail when unable to get status of copy table process", func(t *testing.T) { + bQClient := new(BqClientMock) + defer bQClient.AssertExpectations(t) + + bQDatasetHandle := new(BqDatasetMock) + defer bQDatasetHandle.AssertExpectations(t) + + bQTable := new(BqTableMock) + defer bQTable.AssertExpectations(t) + + bQCopier := new(BqCopierMock) + defer bQCopier.AssertExpectations(t) + + bQJob := new(BqJobMock) + defer bQJob.AssertExpectations(t) + + errorMsg := "unable to get status of copy table" + + //duplicate table + bQClient.On("DatasetInProject", destinationTable.Project, destinationTable.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, nil).Once() + bQClient.On("DatasetInProject", bQResource.Project, bQResource.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, nil).Once() + bQDatasetHandle.On("Table", bQResource.Table).Return(bQTable) + bQDatasetHandle.On("Table", destinationTable.Table).Return(bQTable) + bQTable.On("CopierFrom", []bqiface.Table{bQTable}).Return(bQCopier) + bQCopier.On("Run", testingContext).Return(bQJob, nil) + bQJob.On("Wait", testingContext).Return(&bigquery.JobStatus{}, errors.New(errorMsg)) + + resp, err := backupTable(testingContext, request, bQClient) + + assert.Equal(t, errorMsg, err.Error()) + assert.Equal(t, models.BackupResourceResponse{}, resp) + }) + t.Run("should fail when unable to get metadata of the backup table", func(t *testing.T) { + bQClient := new(BqClientMock) + defer bQClient.AssertExpectations(t) + + bQDatasetHandle := new(BqDatasetMock) + defer bQDatasetHandle.AssertExpectations(t) + + bQTable := new(BqTableMock) + defer bQTable.AssertExpectations(t) + + bQCopier := new(BqCopierMock) + defer bQCopier.AssertExpectations(t) + + bQJob := new(BqJobMock) + defer bQJob.AssertExpectations(t) + + errorMsg := "unable to get metadata of backup table" + + //duplicate table + bQClient.On("DatasetInProject", destinationTable.Project, destinationTable.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, nil).Once() + bQClient.On("DatasetInProject", bQResource.Project, bQResource.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, nil).Once() + bQDatasetHandle.On("Table", bQResource.Table).Return(bQTable) + bQDatasetHandle.On("Table", destinationTable.Table).Return(bQTable) + bQTable.On("CopierFrom", []bqiface.Table{bQTable}).Return(bQCopier) + bQCopier.On("Run", testingContext).Return(bQJob, nil) + bQJob.On("Wait", testingContext).Return(&bigquery.JobStatus{}, nil) + + //update expiry + bQTable.On("Metadata", testingContext).Return(tableMetadata, errors.New(errorMsg)).Once() + + resp, err := backupTable(testingContext, request, bQClient) + + assert.Equal(t, errorMsg, err.Error()) + assert.Equal(t, models.BackupResourceResponse{}, resp) + }) + t.Run("should fail when unable to update expiration of the backup table", func(t *testing.T) { + bQClient := new(BqClientMock) + defer bQClient.AssertExpectations(t) + + bQDatasetHandle := new(BqDatasetMock) + defer bQDatasetHandle.AssertExpectations(t) + + bQTable := new(BqTableMock) + defer bQTable.AssertExpectations(t) + + bQCopier := new(BqCopierMock) + defer bQCopier.AssertExpectations(t) + + bQJob := new(BqJobMock) + defer bQJob.AssertExpectations(t) + + errorMsg := "unable to update expiration of backup table" + + //duplicate table + bQClient.On("DatasetInProject", destinationTable.Project, destinationTable.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, nil).Once() + bQClient.On("DatasetInProject", bQResource.Project, bQResource.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, nil).Once() + bQDatasetHandle.On("Table", bQResource.Table).Return(bQTable) + bQDatasetHandle.On("Table", destinationTable.Table).Return(bQTable) + bQTable.On("CopierFrom", []bqiface.Table{bQTable}).Return(bQCopier) + bQCopier.On("Run", testingContext).Return(bQJob, nil) + bQJob.On("Wait", testingContext).Return(&bigquery.JobStatus{}, nil) + + //update expiry + bQTable.On("Metadata", testingContext).Return(tableMetadata, nil).Once() + bQTable.On("Update", testingContext, toUpdate, eTag).Return(tableMetadata, errors.New(errorMsg)) + + resp, err := backupTable(testingContext, request, bQClient) + + assert.Equal(t, errorMsg, err.Error()) + assert.Equal(t, models.BackupResourceResponse{}, resp) + }) + t.Run("should fail when unable to ensure the backup table", func(t *testing.T) { + bQClient := new(BqClientMock) + defer bQClient.AssertExpectations(t) + + bQDatasetHandle := new(BqDatasetMock) + defer bQDatasetHandle.AssertExpectations(t) + + bQTable := new(BqTableMock) + defer bQTable.AssertExpectations(t) + + bQCopier := new(BqCopierMock) + defer bQCopier.AssertExpectations(t) + + bQJob := new(BqJobMock) + defer bQJob.AssertExpectations(t) + + errorMsg := "unable to ensure the backup table" + + //duplicate table + bQClient.On("DatasetInProject", destinationTable.Project, destinationTable.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, nil).Once() + bQClient.On("DatasetInProject", bQResource.Project, bQResource.Dataset).Return(bQDatasetHandle).Once() + bQDatasetHandle.On("Metadata", testingContext).Return(&datasetMetadata, nil).Once() + bQDatasetHandle.On("Table", bQResource.Table).Return(bQTable) + bQDatasetHandle.On("Table", destinationTable.Table).Return(bQTable) + bQTable.On("CopierFrom", []bqiface.Table{bQTable}).Return(bQCopier) + bQCopier.On("Run", testingContext).Return(bQJob, nil) + bQJob.On("Wait", testingContext).Return(&bigquery.JobStatus{}, nil) + + //update expiry + bQTable.On("Metadata", testingContext).Return(tableMetadata, nil).Once() + bQTable.On("Update", testingContext, toUpdate, eTag).Return(tableMetadata, nil) + + //verify + bQTable.On("Metadata", testingContext).Return(tableMetadata, errors.New(errorMsg)).Once() + + resp, err := backupTable(testingContext, request, bQClient) + + assert.Equal(t, errorMsg, err.Error()) + assert.Equal(t, models.BackupResourceResponse{}, resp) + }) + }) } diff --git a/go.mod b/go.mod index 8c7617e3df..31b4f6ae9d 100644 --- a/go.mod +++ b/go.mod @@ -10,12 +10,13 @@ require ( github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 github.com/gogo/protobuf v1.3.2 github.com/golang-migrate/migrate/v4 v4.14.1 + github.com/golang/glog v1.0.0 // indirect github.com/golang/protobuf v1.5.2 github.com/google/uuid v1.3.0 github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.6.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.5.0 github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69 github.com/hashicorp/go-hclog v0.14.1 github.com/hashicorp/go-multierror v1.1.0 diff --git a/go.sum b/go.sum index 3f9363cfb1..60d38269b8 100644 --- a/go.sum +++ b/go.sum @@ -297,6 +297,7 @@ github.com/golang-migrate/migrate/v4 v4.14.1/go.mod h1:l7Ks0Au6fYHuUIxUhQ0rcVX1u github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v0.0.0-20210429001901-424d2337a529/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -416,8 +417,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdR github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.6.0 h1:rgxjzoDmDXw5q8HONgyHhBas4to0/XWRo/gPpJhsUNQ= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.6.0/go.mod h1:qrJPVzv9YlhsrxJc3P/Q85nr0w1lIRikTl4JlhdDH5w= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.5.0 h1:ajue7SzQMywqRjg2fK7dcpc0QhFGpTR2plWfV4EZWR4= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.5.0/go.mod h1:r1hZAcvfFXuYmcKyCJI9wlyOPIZUJl6FCB8Cpca/NLE= github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69 h1:7xsUJsB2NrdcttQPa7JLEaGzvdbk7KvfrjgHZXOQRo0= github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69/go.mod h1:YLEMZOtU+AZ7dhN9T/IpGhXVGly2bvkJQ+zxj3WeVQo= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= @@ -979,6 +980,7 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= @@ -1270,6 +1272,7 @@ google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210617175327-b9e0b3197ced/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= diff --git a/mock/backup.go b/mock/backup.go new file mode 100644 index 0000000000..1e3a2adee2 --- /dev/null +++ b/mock/backup.go @@ -0,0 +1,28 @@ +package mock + +import ( + "github.com/odpf/optimus/models" + "github.com/odpf/optimus/store" + "github.com/stretchr/testify/mock" +) + +type BackupRepo struct { + mock.Mock +} + +func (repo *BackupRepo) Save(spec models.BackupSpec) error { + return repo.Called(spec).Error(0) +} + +func (repo *BackupRepo) GetAll() ([]models.BackupSpec, error) { + args := repo.Called() + return args.Get(0).([]models.BackupSpec), args.Error(1) +} + +type BackupRepoFactory struct { + mock.Mock +} + +func (fac *BackupRepoFactory) New(projectSpec models.ProjectSpec, storer models.Datastorer) store.BackupRepository { + return fac.Called(projectSpec, storer).Get(0).(store.BackupRepository) +} diff --git a/mock/datastore.go b/mock/datastore.go index b8fbf32fad..268ff332d1 100644 --- a/mock/datastore.go +++ b/mock/datastore.go @@ -37,8 +37,12 @@ func (d *Datastorer) ReadResource(ctx context.Context, inp models.ReadResourceRe func (d *Datastorer) DeleteResource(ctx context.Context, inp models.DeleteResourceRequest) error { return d.Called(ctx, inp).Error(0) } -func (d *Datastorer) BackupResource(ctx context.Context, inp models.BackupResourceRequest) error { - return d.Called(ctx, inp).Error(0) +func (d *Datastorer) BackupResource(ctx context.Context, inp models.BackupResourceRequest) (models.BackupResourceResponse, error) { + args := d.Called(ctx, models.BackupResourceRequest{ + Resource: inp.Resource, + BackupSpec: inp.BackupSpec, + }) + return args.Get(0).(models.BackupResourceResponse), args.Error(1) } type DatastoreTypeController struct { @@ -112,11 +116,21 @@ func (d *DatastoreService) DeleteResource(ctx context.Context, namespace models. return d.Called(ctx, namespace, datastoreName, name).Error(1) } -func (d *DatastoreService) BackupResourceDryRun(ctx context.Context, projectSpec models.ProjectSpec, namespaceSpec models.NamespaceSpec, jobSpecs []models.JobSpec) ([]string, error) { - args := d.Called(ctx, projectSpec, namespaceSpec, jobSpecs) +func (d *DatastoreService) BackupResourceDryRun(ctx context.Context, req models.BackupRequest, jobSpecs []models.JobSpec) ([]string, error) { + args := d.Called(ctx, req, jobSpecs) + return args.Get(0).([]string), args.Error(1) +} + +func (d *DatastoreService) BackupResource(ctx context.Context, req models.BackupRequest, jobSpecs []models.JobSpec) ([]string, error) { + args := d.Called(ctx, req, jobSpecs) return args.Get(0).([]string), args.Error(1) } +func (d *DatastoreService) ListBackupResources(projectSpec models.ProjectSpec, datastoreName string) ([]models.BackupSpec, error) { + args := d.Called(projectSpec, datastoreName) + return args.Get(0).([]models.BackupSpec), args.Error(1) +} + type SupportedDatastoreRepo struct { mock.Mock } diff --git a/mock/job.go b/mock/job.go index b78fa9c087..a68202c981 100644 --- a/mock/job.go +++ b/mock/job.go @@ -192,7 +192,7 @@ func (j *JobService) GetByDestination(projectSpec models.ProjectSpec, destinatio } func (j *JobService) GetDownstream(ctx context.Context, projectSpec models.ProjectSpec, jobName string) ([]models.JobSpec, error) { - args := j.Called(projectSpec, jobName) + args := j.Called(ctx, projectSpec, jobName) return args.Get(0).([]models.JobSpec), args.Error(1) } diff --git a/models/backup.go b/models/backup.go index 0302665da2..e144d946eb 100644 --- a/models/backup.go +++ b/models/backup.go @@ -1,9 +1,49 @@ package models +import ( + "time" + + "github.com/google/uuid" +) + +type BackupResourceRequest struct { + Resource ResourceSpec + BackupSpec BackupRequest + BackupTime time.Time +} + +type BackupResourceResponse struct { + ResultURN string + ResultSpec interface{} +} + type BackupRequest struct { + ID uuid.UUID ResourceName string Project ProjectSpec + Namespace NamespaceSpec Datastore string Description string IgnoreDownstream bool + Config map[string]string + DryRun bool +} + +type BackupResult struct { + URN string + Spec interface{} +} + +type BackupResponse struct { + ResourceURN string + Result BackupResult +} + +type BackupSpec struct { + ID uuid.UUID + Resource ResourceSpec + Result map[string]interface{} + Description string + Config map[string]string + CreatedAt time.Time } diff --git a/models/datastore.go b/models/datastore.go index c1b62db6b8..80906b8763 100644 --- a/models/datastore.go +++ b/models/datastore.go @@ -65,7 +65,7 @@ type Datastorer interface { DeleteResource(context.Context, DeleteResourceRequest) error // BackupResource will backup the requested resource if exists - BackupResource(context.Context, BackupResourceRequest) error + BackupResource(context.Context, BackupResourceRequest) (BackupResourceResponse, error) } type DatastoreTypeController interface { @@ -121,12 +121,6 @@ type DeleteResourceRequest struct { Project ProjectSpec } -type BackupResourceRequest struct { - Resource ResourceSpec - Project ProjectSpec - DryRun bool -} - var ( DatastoreRegistry = &supportedDatastore{ data: map[string]Datastorer{}, @@ -190,5 +184,7 @@ type DatastoreService interface { UpdateResource(ctx context.Context, namespace NamespaceSpec, resourceSpecs []ResourceSpec, obs progress.Observer) error ReadResource(ctx context.Context, namespace NamespaceSpec, datastoreName, name string) (ResourceSpec, error) DeleteResource(ctx context.Context, namespace NamespaceSpec, datastoreName, name string) error - BackupResourceDryRun(ctx context.Context, projectSpec ProjectSpec, namespaceSpec NamespaceSpec, jobSpecs []JobSpec) ([]string, error) + BackupResourceDryRun(ctx context.Context, backupRequest BackupRequest, jobSpecs []JobSpec) ([]string, error) + BackupResource(ctx context.Context, backupRequest BackupRequest, jobSpecs []JobSpec) ([]string, error) + ListBackupResources(projectSpec ProjectSpec, datastoreName string) ([]BackupSpec, error) } diff --git a/store/postgres/backup_repository.go b/store/postgres/backup_repository.go new file mode 100644 index 0000000000..635a5ae5fe --- /dev/null +++ b/store/postgres/backup_repository.go @@ -0,0 +1,117 @@ +package postgres + +import ( + "encoding/json" + "time" + + "gorm.io/datatypes" + + "github.com/google/uuid" + "github.com/jinzhu/gorm" + "github.com/odpf/optimus/models" + "github.com/pkg/errors" +) + +type BackupDetail struct { + Result map[string]interface{} + Description string + Config map[string]string +} + +type Backup struct { + ID uuid.UUID `gorm:"primary_key;type:uuid"` + + ResourceID uuid.UUID + Resource Resource `gorm:"foreignKey:ResourceID"` + + Spec datatypes.JSON + + CreatedAt time.Time `gorm:"not null" json:"created_at"` + UpdatedAt time.Time `gorm:"not null" json:"updated_at"` +} + +type backupRepository struct { + db *gorm.DB + project models.ProjectSpec + datastorer models.Datastorer +} + +func (b Backup) FromSpec(backupSpec models.BackupSpec) (Backup, error) { + adaptResource, err := Resource{}.FromSpec(backupSpec.Resource) + if err != nil { + return Backup{}, err + } + + toDBSpec := BackupDetail{ + Result: backupSpec.Result, + Description: backupSpec.Description, + Config: backupSpec.Config, + } + specInBytes, err := json.Marshal(toDBSpec) + if err != nil { + return Backup{}, nil + } + + return Backup{ + ID: backupSpec.ID, + ResourceID: adaptResource.ID, + Spec: specInBytes, + }, nil +} + +func (repo *backupRepository) Save(spec models.BackupSpec) error { + if len(spec.Resource.ID) == 0 { + return errors.New("resource cannot be empty") + } + p, err := Backup{}.FromSpec(spec) + if err != nil { + return err + } + return repo.db.Create(&p).Error +} + +func (b Backup) ToSpec(ds models.Datastorer) (models.BackupSpec, error) { + backupSpec := BackupDetail{} + if err := json.Unmarshal(b.Spec, &backupSpec); err != nil { + return models.BackupSpec{}, err + } + + resourceSpec, err := b.Resource.ToSpec(ds) + if err != nil { + return models.BackupSpec{}, err + } + + return models.BackupSpec{ + ID: b.ID, + Resource: resourceSpec, + Result: backupSpec.Result, + Description: backupSpec.Description, + Config: backupSpec.Config, + CreatedAt: b.CreatedAt, + }, nil +} + +func (repo *backupRepository) GetAll() ([]models.BackupSpec, error) { + var specs []models.BackupSpec + var backups []Backup + if err := repo.db.Preload("Resource").Joins("JOIN resource ON backup.resource_id = resource.id"). + Where("resource.project_id = ?", repo.project.ID).Find(&backups).Error; err != nil { + return specs, err + } + for _, b := range backups { + adapted, err := b.ToSpec(repo.datastorer) + if err != nil { + return specs, errors.Wrap(err, "failed to adapt backup") + } + specs = append(specs, adapted) + } + return specs, nil +} + +func NewBackupRepository(db *gorm.DB, projectSpec models.ProjectSpec, ds models.Datastorer) *backupRepository { + return &backupRepository{ + db: db, + project: projectSpec, + datastorer: ds, + } +} diff --git a/store/postgres/backup_repository_test.go b/store/postgres/backup_repository_test.go new file mode 100644 index 0000000000..b4a0abf51c --- /dev/null +++ b/store/postgres/backup_repository_test.go @@ -0,0 +1,131 @@ +// +build !unit_test + +package postgres + +import ( + "fmt" + "os" + "testing" + + "github.com/odpf/optimus/mock" + testMock "github.com/stretchr/testify/mock" + + "github.com/google/uuid" + "github.com/jinzhu/gorm" + "github.com/odpf/optimus/models" + "github.com/stretchr/testify/assert" +) + +func TestBackupRepository(t *testing.T) { + projectSpec := models.ProjectSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "t-optimus-project", + Config: map[string]string{ + "bucket": "gs://some_folder", + }, + } + hash, _ := models.NewApplicationSecret("32charshtesthashtesthashtesthash") + + // prepare mocked datastore + dsTypeTableAdapter := new(mock.DatastoreTypeAdapter) + dsTypeTableController := new(mock.DatastoreTypeController) + dsTypeTableController.On("Adapter").Return(dsTypeTableAdapter) + dsController := map[models.ResourceType]models.DatastoreTypeController{ + models.ResourceTypeTable: dsTypeTableController, + } + datastorer := new(mock.Datastorer) + datastorer.On("Types").Return(dsController) + datastorer.On("Name").Return("DS") + + DBSetup := func() *gorm.DB { + dbURL, ok := os.LookupEnv("TEST_OPTIMUS_DB_URL") + if !ok { + panic("unable to find TEST_OPTIMUS_DB_URL env var") + } + dbConn, err := Connect(dbURL, 1, 1) + if err != nil { + panic(err) + } + m, err := NewHTTPFSMigrator(dbURL) + if err != nil { + panic(err) + } + if err := m.Drop(); err != nil { + panic(err) + } + if err := Migrate(dbURL); err != nil { + panic(err) + } + + projRepo := NewProjectRepository(dbConn, hash) + assert.Nil(t, projRepo.Save(projectSpec)) + return dbConn + } + + namespaceSpec := models.NamespaceSpec{ + ID: uuid.Must(uuid.NewRandom()), + Name: "dev-team-1", + ProjectSpec: projectSpec, + } + + t.Run("Save", func(t *testing.T) { + db := DBSetup() + defer db.Close() + + resourceSpec := models.ResourceSpec{ + ID: uuid.Must(uuid.NewRandom()), + Version: 1, + Name: "proj.datas.test", + Datastore: datastorer, + Type: models.ResourceTypeTable, + Spec: nil, + URN: "datastore://proj:datas.test", + } + dsTypeTableAdapter.On("ToYaml", resourceSpec).Return([]byte("some binary data"), nil) + dsTypeTableAdapter.On("FromYaml", []byte("some binary data")).Return(resourceSpec, nil) + + dsTypeTableController.On("GenerateURN", testMock.Anything).Return(resourceSpec.URN, nil).Twice() + + backupUuid := uuid.Must(uuid.NewRandom()) + projectName := "project" + destinationDataset := "optimus_backup" + destinationTable := fmt.Sprintf("backup_playground_table_%s", backupUuid) + //urn := fmt.Sprintf("store://%s:%s.%s", projectName, destinationDataset, destinationTable) + + backupResult := make(map[string]interface{}) + backupResult["project"] = projectName + backupResult["dataset"] = destinationDataset + backupResult["table"] = destinationTable + + backupSpec := models.BackupSpec{ + ID: backupUuid, + Resource: resourceSpec, + Result: backupResult, + Description: "description", + Config: map[string]string{ + "ttl": "30", + "dataset": destinationDataset, + "prefix": "backup", + }, + } + + projectResourceSpecRepo := NewProjectResourceSpecRepository(db, projectSpec, datastorer) + resourceRepo := NewResourceSpecRepository(db, namespaceSpec, datastorer, projectResourceSpecRepo) + + err := resourceRepo.Insert(resourceSpec) + assert.Nil(t, err) + + backupRepo := NewBackupRepository(db, projectSpec, datastorer) + err = backupRepo.Save(backupSpec) + assert.Nil(t, err) + + backups, err := backupRepo.GetAll() + assert.Nil(t, err) + + assert.Equal(t, backupSpec.ID, backups[0].ID) + assert.Equal(t, backupSpec.Description, backups[0].Description) + assert.Equal(t, backupSpec.Resource, backups[0].Resource) + assert.Equal(t, backupSpec.Config, backups[0].Config) + assert.Equal(t, backupSpec.Result, backups[0].Result) + }) +} diff --git a/store/postgres/migrations/000014_create_backup_table.down.sql b/store/postgres/migrations/000014_create_backup_table.down.sql new file mode 100644 index 0000000000..feba141415 --- /dev/null +++ b/store/postgres/migrations/000014_create_backup_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS backup; diff --git a/store/postgres/migrations/000014_create_backup_table.up.sql b/store/postgres/migrations/000014_create_backup_table.up.sql new file mode 100644 index 0000000000..b7af9da339 --- /dev/null +++ b/store/postgres/migrations/000014_create_backup_table.up.sql @@ -0,0 +1,8 @@ +CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; +CREATE TABLE IF NOT EXISTS backup ( + id UUID PRIMARY KEY NOT NULL, + resource_id UUID NOT NULL REFERENCES resource (id) ON DELETE CASCADE, + spec JSONB, + created_at TIMESTAMP WITH TIME ZONE NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL +); diff --git a/store/store.go b/store/store.go index 72c82496d4..603939053c 100644 --- a/store/store.go +++ b/store/store.go @@ -97,3 +97,9 @@ type ReplaySpecRepository interface { GetByProjectIDAndStatus(projectID uuid.UUID, status []string) ([]models.ReplaySpec, error) GetByProjectID(projectID uuid.UUID) ([]models.ReplaySpec, error) } + +// BackupRepository represents a storage interface for backup objects +type BackupRepository interface { + Save(spec models.BackupSpec) error + GetAll() ([]models.BackupSpec, error) +}