diff --git a/Makefile b/Makefile index 9e4bbb4..d819164 100644 --- a/Makefile +++ b/Makefile @@ -52,7 +52,7 @@ OPERATOR_SDK_VERSION ?= v1.38.0 # Image URL to use all building/pushing image targets DOCKER_HUB_NAME ?= $(shell docker info | sed '/Username:/!d;s/.* //') IMG_NAME ?= typesense-operator -IMG_TAG ?= 0.2.1 +IMG_TAG ?= 0.2.2 IMG ?= $(DOCKER_HUB_NAME)/$(IMG_NAME):$(IMG_TAG) # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. diff --git a/README.md b/README.md index 0dafc9f..7862906 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,8 @@ Key features of Typesense Kubernetes Operator include: - provision Typesense services (headless & discovery `Services`), - actively discover and update Typesense's nodes list (quorum configuration mounted as `ConfigMap`), - place claims for Typesense `PersistentVolumes` + - _optionally_ expose Typesense API endpoint via an `Ingress` + - _optionally_ provision one or multiple instances (one per target URL) of DocSearch as `Cronjobs` - **Raft Quorum Configuration & Recovery Automation**: - Continuous active (re)discovery of the quorum configuration reacting to changes in `ReplicaSet` **without the need of an additional sidecar container**, - Automatic recovery of a cluster that has lost quorum **without the need of manual intervention**. @@ -78,15 +80,20 @@ The Typesense Kubernetes Operator manages the entire lifecycle of Typesense Clus 4. A `StatefulSet` is then created. The quorum configuration stored in the `ConfigMap` is mounted as a volume in each `Pod` under `/usr/share/typesense/nodelist`. No `Pod` restart is necessary when the `ConfigMap` changes, as raft automatically detects and applies the updates. +5. Optionally, an **nginx:alpine** workload is provisioned as `Deployment` and published via an `Ingress`, in order to exposed safely + the Typesense REST/API endpoint outside the Kubernetes cluster **only** to selected referrers. The configuration of the + nginx workload is stored in a `ConfigMap`. +6. Optionally, one or more instances of **DocSearch** are deployed as distinct `CronJobs` (one per scraping target URL), + which based on user-defined schedules, periodically scrape the target sites and store the results in Typesense. -![image](https://github.com/user-attachments/assets/30b6989c-c872-46ef-8ece-86c5d4911667) +![image](https://github.com/user-attachments/assets/2afb802c-11f7-4be4-b44f-5dab9d489971) > [!NOTE] > The interval between reconciliation loops depends on the number of nodes. This approach ensures raft has sufficient > "breathing room" to carry out its operations—such as leader election, log replication, and bootstrapping—before the > next quorum health reconciliation begins. -5. The controller assesses the quorum's health by probing each node at `http://{nodeUrl}:{api-port}/health`. Based on the +7. The controller assesses the quorum's health by probing each node at `http://{nodeUrl}:{api-port}/health`. Based on the results, it formulates an action plan for the next reconciliation loop. This process is detailed in the following section: ### Problem 2: Recovering a cluster that has lost quorum @@ -146,7 +153,7 @@ of manual intervention in order to recover a cluster that has lost quorum. Typesense Kubernetes Operator is controlling the lifecycle of multiple Typesense instances in the same Kubernetes cluster by introducing `TypesenseCluster`, a new Custom Resource Definition: -![image](https://github.com/user-attachments/assets/23e40781-ca21-4297-93bf-2b5dbebc7e0e) +![image](https://github.com/user-attachments/assets/3dd20498-fb4b-46b7-9f60-ff413fadc942) **Spec** @@ -160,6 +167,7 @@ introducing `TypesenseCluster`, a new Custom Resource Definition: | corsDomains | domains that would be allowed for CORS calls | X | | | storage | check StorageSpec below | | | | ingress | check IngressSpec below | X | | +| scrapers | array of DocSearchScraperSpec; check below | X | | **StorageSpec** (optional) @@ -178,12 +186,27 @@ introducing `TypesenseCluster`, a new Custom Resource Definition: | ingressClassName | Ingress to use | | | | annotations | User-Defined annotations | X | | +> [!IMPORTANT] +> This feature requires the existence of [cert-manager](https://cert-manager.io/) in the cluster, but **does not** actively enforce it with an error. +> If you are targeting Open Telekom Cloud, you might be interested in provisioning additionally the designated DNS solver webhook +> for Open Telekom Cloud. You can find it [here](https://github.com/akyriako/cert-manager-webhook-opentelekomcloud). + +**DocSearchScraperSpec** (optional) + +| Name | Description | Optional | Default | +|-------------|------------------------------------------|----------|---------| +| name | name of the scraper | | | +| image | container image to use | | | +| config | config to use | | | +| schedule | cron expression; no timezone; no seconds | | | + > [!CAUTION] > Although in Typesense documentation under _Production Best Practices_ -> _Configuration_ is stated: > "_Typesense comes built-in with a high performance HTTP server (opens new window)that is used by likes of Fastly (opens new window)in > their edge servers at scale. So Typesense can be directly exposed to incoming public-facing internet traffic, -> without the need to place it behind another web server like Nginx / Apache or your backend API._" it is highly recommended -> , from this operator's perspective, to always expose Typesense behind a reverse proxy (using the `referer` option). +> without the need to place it behind another web server like Nginx / Apache or your backend API._" +> +> It is highly recommended, from this operator's perspective, to always expose Typesense behind a reverse proxy (using the `referer` option). **Status** diff --git a/api/v1alpha1/typesensecluster_types.go b/api/v1alpha1/typesensecluster_types.go index 4c3ec57..8ad14ba 100644 --- a/api/v1alpha1/typesensecluster_types.go +++ b/api/v1alpha1/typesensecluster_types.go @@ -57,6 +57,8 @@ type TypesenseClusterSpec struct { Storage *StorageSpec `json:"storage"` Ingress *IngressSpec `json:"ingress,omitempty"` + + Scrapers []DocSearchScraperSpec `json:"scrapers,omitempty"` } type StorageSpec struct { @@ -84,6 +86,16 @@ type IngressSpec struct { Annotations map[string]string `json:"annotations,omitempty"` } +type DocSearchScraperSpec struct { + Name string `json:"name"` + Image string `json:"image"` + Config string `json:"config"` + + // +kubebuilder:validation:Pattern:=`(^((\*\/)?([0-5]?[0-9])((\,|\-|\/)([0-5]?[0-9]))*|\*)\s+((\*\/)?((2[0-3]|1[0-9]|[0-9]|00))((\,|\-|\/)(2[0-3]|1[0-9]|[0-9]|00))*|\*)\s+((\*\/)?([1-9]|[12][0-9]|3[01])((\,|\-|\/)([1-9]|[12][0-9]|3[01]))*|\*)\s+((\*\/)?([1-9]|1[0-2])((\,|\-|\/)([1-9]|1[0-2]))*|\*|(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|des))\s+((\*\/)?[0-6]((\,|\-|\/)[0-6])*|\*|00|(sun|mon|tue|wed|thu|fri|sat))\s*$)|@(annually|yearly|monthly|weekly|daily|hourly|reboot)` + // +kubebuilder:validation:Type=string + Schedule string `json:"schedule"` +} + // TypesenseClusterStatus defines the observed state of TypesenseCluster type TypesenseClusterStatus struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 930cc31..2f6da68 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -25,6 +25,21 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocSearchScraperSpec) DeepCopyInto(out *DocSearchScraperSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocSearchScraperSpec. +func (in *DocSearchScraperSpec) DeepCopy() *DocSearchScraperSpec { + if in == nil { + return nil + } + out := new(DocSearchScraperSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { *out = *in @@ -145,6 +160,11 @@ func (in *TypesenseClusterSpec) DeepCopyInto(out *TypesenseClusterSpec) { *out = new(IngressSpec) (*in).DeepCopyInto(*out) } + if in.Scrapers != nil { + in, out := &in.Scrapers, &out.Scrapers + *out = make([]DocSearchScraperSpec, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypesenseClusterSpec. diff --git a/charts/typesense-operator/Chart.yaml b/charts/typesense-operator/Chart.yaml index 6eaaffd..17bec68 100644 --- a/charts/typesense-operator/Chart.yaml +++ b/charts/typesense-operator/Chart.yaml @@ -13,9 +13,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.1 +version: 0.2.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.2.1" +appVersion: "0.2.2" diff --git a/charts/typesense-operator/templates/manager-rbac.yaml b/charts/typesense-operator/templates/manager-rbac.yaml index a6c9c1a..1bacbe9 100644 --- a/charts/typesense-operator/templates/manager-rbac.yaml +++ b/charts/typesense-operator/templates/manager-rbac.yaml @@ -73,6 +73,18 @@ rules: - patch - update - watch +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: diff --git a/charts/typesense-operator/templates/typesensecluster-crd.yaml b/charts/typesense-operator/templates/typesensecluster-crd.yaml index 6604c92..3ae01a9 100644 --- a/charts/typesense-operator/templates/typesensecluster-crd.yaml +++ b/charts/typesense-operator/templates/typesensecluster-crd.yaml @@ -95,6 +95,25 @@ spec: resetPeersOnError: default: true type: boolean + scrapers: + items: + properties: + config: + type: string + image: + type: string + name: + type: string + schedule: + pattern: (^((\*\/)?([0-5]?[0-9])((\,|\-|\/)([0-5]?[0-9]))*|\*)\s+((\*\/)?((2[0-3]|1[0-9]|[0-9]|00))((\,|\-|\/)(2[0-3]|1[0-9]|[0-9]|00))*|\*)\s+((\*\/)?([1-9]|[12][0-9]|3[01])((\,|\-|\/)([1-9]|[12][0-9]|3[01]))*|\*)\s+((\*\/)?([1-9]|1[0-2])((\,|\-|\/)([1-9]|1[0-2]))*|\*|(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|des))\s+((\*\/)?[0-6]((\,|\-|\/)[0-6])*|\*|00|(sun|mon|tue|wed|thu|fri|sat))\s*$)|@(annually|yearly|monthly|weekly|daily|hourly|reboot) + type: string + required: + - config + - image + - name + - schedule + type: object + type: array storage: properties: size: diff --git a/charts/typesense-operator/values.yaml b/charts/typesense-operator/values.yaml index 0a22f0d..2da7be4 100644 --- a/charts/typesense-operator/values.yaml +++ b/charts/typesense-operator/values.yaml @@ -12,7 +12,7 @@ controllerManager: - ALL image: repository: akyriako78/typesense-operator - tag: 0.2.1 + tag: 0.2.2 resources: limits: cpu: 500m diff --git a/config/crd/bases/ts.opentelekomcloud.com_typesenseclusters.yaml b/config/crd/bases/ts.opentelekomcloud.com_typesenseclusters.yaml index 313ecfe..9f5e7a0 100644 --- a/config/crd/bases/ts.opentelekomcloud.com_typesenseclusters.yaml +++ b/config/crd/bases/ts.opentelekomcloud.com_typesenseclusters.yaml @@ -94,6 +94,25 @@ spec: resetPeersOnError: default: true type: boolean + scrapers: + items: + properties: + config: + type: string + image: + type: string + name: + type: string + schedule: + pattern: (^((\*\/)?([0-5]?[0-9])((\,|\-|\/)([0-5]?[0-9]))*|\*)\s+((\*\/)?((2[0-3]|1[0-9]|[0-9]|00))((\,|\-|\/)(2[0-3]|1[0-9]|[0-9]|00))*|\*)\s+((\*\/)?([1-9]|[12][0-9]|3[01])((\,|\-|\/)([1-9]|[12][0-9]|3[01]))*|\*)\s+((\*\/)?([1-9]|1[0-2])((\,|\-|\/)([1-9]|1[0-2]))*|\*|(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|des))\s+((\*\/)?[0-6]((\,|\-|\/)[0-6])*|\*|00|(sun|mon|tue|wed|thu|fri|sat))\s*$)|@(annually|yearly|monthly|weekly|daily|hourly|reboot) + type: string + required: + - config + - image + - name + - schedule + type: object + type: array storage: properties: size: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index f41e811..3639319 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -72,6 +72,18 @@ rules: - patch - update - watch +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: diff --git a/config/samples/ts_v1alpha1_typesensecluster_kind.yaml b/config/samples/ts_v1alpha1_typesensecluster_kind.yaml index b28fdd4..a0d348f 100644 --- a/config/samples/ts_v1alpha1_typesensecluster_kind.yaml +++ b/config/samples/ts_v1alpha1_typesensecluster_kind.yaml @@ -41,4 +41,13 @@ spec: ingress: host: host.example.com ingressClassName: nginx - clusterIssuer: lets-encrypt-prod \ No newline at end of file + clusterIssuer: lets-encrypt-prod + scrapers: + - name: empty-target + image: typesense/docsearch-scraper:0.11.0 + config: '' + schedule: '*/2 * * * *' + - name: docusaurus-example-com + image: typesense/docsearch-scraper:0.11.0 + config: "{\"index_name\":\"docuraurus-example\",\"start_urls\":[\"https://docusaurus.example.com/\"],\"sitemap_urls\":[\"https://docusaurus.example.com/sitemap.xml\"],\"sitemap_alternate_links\":true,\"stop_urls\":[\"/tests\"],\"selectors\":{\"lvl0\":{\"selector\":\"(//ul[contains(@class,'menu__list')]//a[contains(@class, 'menu__link menu__link--sublist menu__link--active')]/text() | //nav[contains(@class, 'navbar')]//a[contains(@class, 'navbar__link--active')]/text())[last()]\",\"type\":\"xpath\",\"global\":true,\"default_value\":\"Documentation\"},\"lvl1\":\"header h1\",\"lvl2\":\"article h2\",\"lvl3\":\"article h3\",\"lvl4\":\"article h4\",\"lvl5\":\"article h5, article td:first-child\",\"lvl6\":\"article h6\",\"text\":\"article p, article li, article td:last-child\"},\"strip_chars\":\" .,;:#\",\"custom_settings\":{\"separatorsToIndex\":\"_\",\"attributesForFaceting\":[\"language\",\"version\",\"type\",\"docusaurus_tag\"],\"attributesToRetrieve\":[\"hierarchy\",\"content\",\"anchor\",\"url\",\"url_without_anchor\",\"type\"]},\"conversation_id\":[\"833762294\"],\"nb_hits\":46250}" + schedule: '*/2 * * * *' \ No newline at end of file diff --git a/internal/controller/typesensecluster_condition_types.go b/internal/controller/typesensecluster_condition_types.go index e88456b..82e9012 100644 --- a/internal/controller/typesensecluster_condition_types.go +++ b/internal/controller/typesensecluster_condition_types.go @@ -18,6 +18,7 @@ const ( ConditionReasonConfigMapNotReady = "ConfigMapNotReady" ConditionReasonServicesNotReady = "ServicesNotReady" ConditionReasonIngressNotReady = "IngressNotReady" + ConditionReasonScrapersNotReady = "ScrapersNotReady" ConditionReasonQuorumReady ConditionQuorum = "QuorumReady" ConditionReasonQuorumNotReady ConditionQuorum = "QuorumNotReady" ConditionReasonQuorumDowngraded ConditionQuorum = "QuorumDowngraded" diff --git a/internal/controller/typesensecluster_controller.go b/internal/controller/typesensecluster_controller.go index 328a50f..3f6a3aa 100644 --- a/internal/controller/typesensecluster_controller.go +++ b/internal/controller/typesensecluster_controller.go @@ -77,6 +77,7 @@ var ( // +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch // +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -137,6 +138,15 @@ func (r *TypesenseClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, err } + err = r.ReconcileScraper(ctx, ts) + if err != nil { + cerr := r.setConditionNotReady(ctx, &ts, ConditionReasonScrapersNotReady, err) + if cerr != nil { + err = errors.Wrap(err, cerr.Error()) + } + return ctrl.Result{}, err + } + sts, err := r.ReconcileStatefulSet(ctx, ts) if err != nil { cerr := r.setConditionNotReady(ctx, &ts, ConditionReasonStatefulSetNotReady, err) diff --git a/internal/controller/typesensecluster_scraper.go b/internal/controller/typesensecluster_scraper.go new file mode 100644 index 0000000..e2f35c4 --- /dev/null +++ b/internal/controller/typesensecluster_scraper.go @@ -0,0 +1,204 @@ +package controller + +import ( + "context" + "fmt" + tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "strconv" +) + +func (r *TypesenseClusterReconciler) ReconcileScraper(ctx context.Context, ts tsv1alpha1.TypesenseCluster) (err error) { + r.logger.V(debugLevel).Info("reconciling scrapers") + + labelSelector := getLabels(&ts) + listOptions := []client.ListOption{ + client.InNamespace(ts.Namespace), + client.MatchingLabels(labelSelector), + } + + var scraperCronJobs batchv1.CronJobList + if err := r.List(ctx, &scraperCronJobs, listOptions...); err != nil { + return err + } + + inSpecs := func(cronJobName string, scrapers []tsv1alpha1.DocSearchScraperSpec) bool { + for _, scraper := range scrapers { + if cronJobName == fmt.Sprintf("%s-scraper", scraper.Name) { + return true + } + } + + return false + } + + for _, scraperCronJob := range scraperCronJobs.Items { + if ts.Spec.Scrapers == nil || !inSpecs(scraperCronJob.Name, ts.Spec.Scrapers) { + err = r.deleteScraper(ctx, &scraperCronJob) + if err != nil { + return err + } + } + } + + if ts.Spec.Scrapers == nil { + return nil + } + + for _, scraper := range ts.Spec.Scrapers { + scraperName := fmt.Sprintf("%s-scraper", scraper.Name) + scraperExists := true + scraperObjectKey := client.ObjectKey{Namespace: ts.Namespace, Name: scraperName} + + var scraperCronJob = &batchv1.CronJob{} + if err := r.Get(ctx, scraperObjectKey, scraperCronJob); err != nil { + if apierrors.IsNotFound(err) { + scraperExists = false + } else { + r.logger.Error(err, fmt.Sprintf("unable to fetch scraper cronjob: %s", scraperObjectKey)) + } + } + + if !scraperExists { + r.logger.V(debugLevel).Info("creating scraper cronjob", "cronjob", scraperObjectKey.Name) + + err = r.createScraper(ctx, scraperObjectKey, &ts, &scraper) + if err != nil { + r.logger.Error(err, "creating scraper cronjob failed", "cronjob", scraperObjectKey.Name) + return err + } + } else { + r.logger.V(debugLevel).Info("updating scraper cronjob", "cronjob", scraperObjectKey.Name) + + hasChanged := false + hasChangedConfig := false + container := scraperCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0] + + for _, env := range container.Env { + if env.Name == "CONFIG" && env.Value != scraper.Config { + hasChangedConfig = true + break + } + } + + if scraperCronJob.Spec.Schedule != scraper.Schedule || container.Image != scraper.Image || hasChangedConfig { + hasChanged = true + } + + if hasChanged { + err = r.deleteScraper(ctx, scraperCronJob) + if err != nil { + r.logger.Error(err, "deleting scraper cronjob failed", "cronjob", scraperObjectKey.Name) + return err + } + + err = r.createScraper(ctx, scraperObjectKey, &ts, &scraper) + if err != nil { + r.logger.Error(err, "creating scraper cronjob failed", "cronjob", scraperObjectKey.Name) + return err + } + } + } + } + + return nil +} + +func (r *TypesenseClusterReconciler) createScraper(ctx context.Context, key client.ObjectKey, ts *tsv1alpha1.TypesenseCluster, scraperSpec *tsv1alpha1.DocSearchScraperSpec) error { + scraper := &batchv1.CronJob{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "batch/v1", + Kind: "CronJob", + }, + ObjectMeta: getObjectMeta(ts, &key.Name, nil), + Spec: batchv1.CronJobSpec{ + ConcurrencyPolicy: batchv1.ForbidConcurrent, + SuccessfulJobsHistoryLimit: ptr.To[int32](1), + FailedJobsHistoryLimit: ptr.To[int32](1), + Schedule: scraperSpec.Schedule, + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + BackoffLimit: ptr.To[int32](0), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: fmt.Sprintf("%s-docsearch-scraper", scraperSpec.Name), + Image: scraperSpec.Image, + Env: []corev1.EnvVar{ + { + Name: "CONFIG", + Value: scraperSpec.Config, + }, + { + Name: "TYPESENSE_API_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + Key: "typesense-api-key", + LocalObjectReference: corev1.LocalObjectReference{ + Name: fmt.Sprintf("%s-admin-key", ts.Name), + }, + }, + }, + }, + { + Name: "TYPESENSE_HOST", + Value: fmt.Sprintf("%s-svc", ts.Name), + }, + { + Name: "TYPESENSE_PORT", + Value: strconv.Itoa(ts.Spec.ApiPort), + }, + { + Name: "TYPESENSE_PROTOCOL", + Value: "http", + }, + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1024m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("128m"), + corev1.ResourceMemory: resource.MustParse("112Mi"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + err := ctrl.SetControllerReference(ts, scraper, r.Scheme) + if err != nil { + return err + } + + err = r.Create(ctx, scraper) + if err != nil { + return err + } + + return nil +} + +func (r *TypesenseClusterReconciler) deleteScraper(ctx context.Context, scraper *batchv1.CronJob) error { + err := r.Delete(ctx, scraper) + if err != nil { + return err + } + + return nil +}