diff --git a/apis/clusters/v1beta1/cassandra_types.go b/apis/clusters/v1beta1/cassandra_types.go
index cd19c9374..784a5f75a 100644
--- a/apis/clusters/v1beta1/cassandra_types.go
+++ b/apis/clusters/v1beta1/cassandra_types.go
@@ -29,10 +29,6 @@ import (
"github.com/instaclustr/operator/pkg/models"
)
-type Spark struct {
- Version string `json:"version"`
-}
-
type CassandraRestoreFrom struct {
// Original cluster ID. Backup from that cluster will be used for restore
ClusterID string `json:"clusterID"`
@@ -60,7 +56,6 @@ type CassandraSpec struct {
DataCentres []*CassandraDataCentre `json:"dataCentres,omitempty"`
LuceneEnabled bool `json:"luceneEnabled,omitempty"`
PasswordAndUserAuth bool `json:"passwordAndUserAuth,omitempty"`
- Spark []*Spark `json:"spark,omitempty"`
BundledUseOnly bool `json:"bundledUseOnly,omitempty"`
UserRefs References `json:"userRefs,omitempty"`
//+kubebuilder:validate:MaxItems:=1
@@ -79,6 +74,47 @@ type CassandraDataCentre struct {
PrivateIPBroadcastForDiscovery bool `json:"privateIpBroadcastForDiscovery"`
ClientToClusterEncryption bool `json:"clientToClusterEncryption"`
ReplicationFactor int `json:"replicationFactor"`
+
+ // Adds the specified version of Debezium Connector Cassandra to the Cassandra cluster
+ // +kubebuilder:validation:MaxItems=1
+ Debezium []DebeziumCassandraSpec `json:"debezium,omitempty"`
+}
+
+type DebeziumCassandraSpec struct {
+ // KafkaVPCType with only VPC_PEERED supported
+ KafkaVPCType string `json:"kafkaVpcType"`
+ KafkaTopicPrefix string `json:"kafkaTopicPrefix"`
+ KafkaDataCentreID string `json:"kafkaCdcId"`
+ Version string `json:"version"`
+}
+
+func (d *CassandraDataCentre) DebeziumToInstAPI() []*models.Debezium {
+ var instDebezium []*models.Debezium
+ for _, k8sDebezium := range d.Debezium {
+ instDebezium = append(instDebezium, &models.Debezium{
+ KafkaVPCType: k8sDebezium.KafkaVPCType,
+ KafkaTopicPrefix: k8sDebezium.KafkaTopicPrefix,
+ KafkaDataCentreID: k8sDebezium.KafkaDataCentreID,
+ Version: k8sDebezium.Version,
+ })
+ }
+ return instDebezium
+}
+
+func (d *CassandraDataCentre) DebeziumEquals(other *CassandraDataCentre) bool {
+ if len(d.Debezium) != len(other.Debezium) {
+ return false
+ }
+
+ for _, old := range d.Debezium {
+ for _, new := range other.Debezium {
+ if old != new {
+ return false
+ }
+ }
+ }
+
+ return true
}
//+kubebuilder:object:root=true
@@ -214,10 +250,6 @@ func (cs *CassandraSpec) validateUpdate(oldSpec CassandraSpec) error {
if err != nil {
return err
}
- err = validateSpark(cs.Spark, oldSpec.Spark)
- if err != nil {
- return err
- }
for _, dc := range cs.DataCentres {
err = cs.validateResizeSettings(dc.NodesNumber)
@@ -263,6 +295,10 @@ func (cs *CassandraSpec) validateDataCentresUpdate(oldSpec CassandraSpec) error
return err
}
+ if !oldDC.DebeziumEquals(newDC) {
+ return models.ErrDebeziumImmutable
+ }
+
exists = true
break
}
@@ -309,12 +345,24 @@ func (cs *CassandraSpec) FromInstAPI(iCass *models.CassandraCluster) CassandraSp
DataCentres: cs.DCsFromInstAPI(iCass.DataCentres),
LuceneEnabled: iCass.LuceneEnabled,
PasswordAndUserAuth: iCass.PasswordAndUserAuth,
- Spark: cs.SparkFromInstAPI(iCass.Spark),
BundledUseOnly: iCass.BundledUseOnly,
ResizeSettings: resizeSettingsFromInstAPI(iCass.ResizeSettings),
}
}
+func (cs *CassandraSpec) DebeziumFromInstAPI(iDebeziums []*models.Debezium) (dcs []DebeziumCassandraSpec) {
+ var debeziums []DebeziumCassandraSpec
+ for _, iDebezium := range iDebeziums {
+ debeziums = append(debeziums, DebeziumCassandraSpec{
+ KafkaVPCType: iDebezium.KafkaVPCType,
+ KafkaTopicPrefix: iDebezium.KafkaTopicPrefix,
+ KafkaDataCentreID: iDebezium.KafkaDataCentreID,
+ Version: iDebezium.Version,
+ })
+ }
+ return debeziums
+}
+
func (cs *CassandraSpec) DCsFromInstAPI(iDCs []*models.CassandraDataCentre) (dcs []*CassandraDataCentre) {
for _, iDC := range iDCs {
dcs = append(dcs, &CassandraDataCentre{
@@ -323,15 +371,7 @@ func (cs *CassandraSpec) DCsFromInstAPI(iDCs []*models.CassandraDataCentre) (dcs
PrivateIPBroadcastForDiscovery: iDC.PrivateIPBroadcastForDiscovery,
ClientToClusterEncryption: iDC.ClientToClusterEncryption,
ReplicationFactor: iDC.ReplicationFactor,
- })
- }
- return
-}
-
-func (cs *CassandraSpec) SparkFromInstAPI(iSparks []*models.Spark) (sparks []*Spark) {
- for _, iSpark := range iSparks {
- sparks = append(sparks, &Spark{
- Version: iSpark.Version,
+ Debezium: cs.DebeziumFromInstAPI(iDC.Debezium),
})
}
return
@@ -350,7 +390,6 @@ func (cs *CassandraSpec) ToInstAPI() *models.CassandraCluster {
CassandraVersion: cs.Version,
LuceneEnabled: cs.LuceneEnabled,
PasswordAndUserAuth: cs.PasswordAndUserAuth,
- Spark: cs.SparkToInstAPI(),
DataCentres: cs.DCsToInstAPI(),
SLATier: cs.SLATier,
PrivateNetworkCluster: cs.PrivateNetworkCluster,
@@ -382,21 +421,11 @@ func (c *Cassandra) RestoreInfoToInstAPI(restoreData *CassandraRestoreFrom) any
return iRestore
}
-func (cs *CassandraSpec) SparkToInstAPI() (iSparks []*models.Spark) {
- for _, spark := range cs.Spark {
- iSparks = append(iSparks, &models.Spark{
- Version: spark.Version,
- })
- }
- return
-}
-
func (cs *CassandraSpec) IsEqual(spec CassandraSpec) bool {
return cs.Cluster.IsEqual(spec.Cluster) &&
cs.AreDCsEqual(spec.DataCentres) &&
cs.LuceneEnabled == spec.LuceneEnabled &&
cs.PasswordAndUserAuth == spec.PasswordAndUserAuth &&
- cs.IsSparkEqual(spec.Spark) &&
cs.BundledUseOnly == spec.BundledUseOnly
}
@@ -416,21 +445,8 @@ func (cs *CassandraSpec) AreDCsEqual(dcs []*CassandraDataCentre) bool {
iDC.ClientToClusterEncryption != dataCentre.ClientToClusterEncryption ||
iDC.PrivateIPBroadcastForDiscovery != dataCentre.PrivateIPBroadcastForDiscovery ||
iDC.ContinuousBackup != dataCentre.ContinuousBackup ||
- iDC.ReplicationFactor != dataCentre.ReplicationFactor {
- return false
- }
- }
-
- return true
-}
-
-func (cs *CassandraSpec) IsSparkEqual(sparks []*Spark) bool {
- if len(cs.Spark) != len(sparks) {
- return false
- }
-
- for i, spark := range sparks {
- if cs.Spark[i].Version != spark.Version {
+ iDC.ReplicationFactor != dataCentre.ReplicationFactor ||
+ !dataCentre.DebeziumEquals(iDC) {
return false
}
}
@@ -464,6 +480,7 @@ func (cdc *CassandraDataCentre) ToInstAPI() *models.CassandraDataCentre {
ContinuousBackup: cdc.ContinuousBackup,
PrivateIPBroadcastForDiscovery: cdc.PrivateIPBroadcastForDiscovery,
ReplicationFactor: cdc.ReplicationFactor,
+ Debezium: cdc.DebeziumToInstAPI(),
}
}
diff --git a/apis/clusters/v1beta1/cassandra_webhook.go b/apis/clusters/v1beta1/cassandra_webhook.go
index 4d4a6fef6..7869df417 100644
--- a/apis/clusters/v1beta1/cassandra_webhook.go
+++ b/apis/clusters/v1beta1/cassandra_webhook.go
@@ -87,10 +87,6 @@ func (cv *cassandraValidator) ValidateCreate(ctx context.Context, obj runtime.Ob
return err
}
- if len(c.Spec.Spark) > 1 {
- return fmt.Errorf("spark should not have more than 1 item")
- }
-
appVersions, err := cv.API.ListAppVersions(models.CassandraAppKind)
if err != nil {
return fmt.Errorf("cannot list versions for kind: %v, err: %w",
@@ -102,13 +98,6 @@ func (cv *cassandraValidator) ValidateCreate(ctx context.Context, obj runtime.Ob
return err
}
- for _, spark := range c.Spec.Spark {
- err = validateAppVersion(appVersions, models.SparkAppType, spark.Version)
- if err != nil {
- return err
- }
- }
-
if len(c.Spec.DataCentres) == 0 {
return fmt.Errorf("data centres field is empty")
}
diff --git a/apis/clusters/v1beta1/structs.go b/apis/clusters/v1beta1/structs.go
index f8444df55..082bd126e 100644
--- a/apis/clusters/v1beta1/structs.go
+++ b/apis/clusters/v1beta1/structs.go
@@ -89,7 +89,6 @@ type Cluster struct {
// The PCI compliance standards relate to the security of user data and transactional information.
// Can only be applied clusters provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch and Redis.
- // PCI compliance cannot be enabled if the cluster has Spark.
PCICompliance bool `json:"pciCompliance,omitempty"`
PrivateNetworkCluster bool `json:"privateNetworkCluster,omitempty"`
diff --git a/apis/clusters/v1beta1/validation.go b/apis/clusters/v1beta1/validation.go
index 6eb8e742f..697b6ad15 100644
--- a/apis/clusters/v1beta1/validation.go
+++ b/apis/clusters/v1beta1/validation.go
@@ -170,18 +170,6 @@ func validateTwoFactorDelete(new, old []*TwoFactorDelete) error {
return nil
}
-func validateSpark(new, old []*Spark) error {
- if len(old) != len(new) {
- return models.ErrImmutableSpark
- }
- if len(old) != 0 &&
- *old[0] != *new[0] {
- return models.ErrImmutableSpark
- }
-
- return nil
-}
-
func validateTagsUpdate(new, old map[string]string) error {
if len(old) != len(new) {
return models.ErrImmutableTags
diff --git a/apis/clusters/v1beta1/zz_generated.deepcopy.go b/apis/clusters/v1beta1/zz_generated.deepcopy.go
index a0cc86ca2..e19d6b661 100644
--- a/apis/clusters/v1beta1/zz_generated.deepcopy.go
+++ b/apis/clusters/v1beta1/zz_generated.deepcopy.go
@@ -379,6 +379,11 @@ func (in *Cassandra) DeepCopyObject() runtime.Object {
func (in *CassandraDataCentre) DeepCopyInto(out *CassandraDataCentre) {
*out = *in
in.DataCentre.DeepCopyInto(&out.DataCentre)
+ if in.Debezium != nil {
+ in, out := &in.Debezium, &out.Debezium
+ *out = make([]DebeziumCassandraSpec, len(*in))
+ copy(*out, *in)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraDataCentre.
@@ -469,17 +474,6 @@ func (in *CassandraSpec) DeepCopyInto(out *CassandraSpec) {
}
}
}
- if in.Spark != nil {
- in, out := &in.Spark, &out.Spark
- *out = make([]*Spark, len(*in))
- for i := range *in {
- if (*in)[i] != nil {
- in, out := &(*in)[i], &(*out)[i]
- *out = new(Spark)
- **out = **in
- }
- }
- }
if in.UserRefs != nil {
in, out := &in.UserRefs, &out.UserRefs
*out = make(References, len(*in))
@@ -816,6 +810,21 @@ func (in *DataCentreStatus) DeepCopy() *DataCentreStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DebeziumCassandraSpec) DeepCopyInto(out *DebeziumCassandraSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebeziumCassandraSpec.
+func (in *DebeziumCassandraSpec) DeepCopy() *DebeziumCassandraSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DebeziumCassandraSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DedicatedZookeeper) DeepCopyInto(out *DedicatedZookeeper) {
*out = *in
@@ -2347,21 +2356,6 @@ func (in *SharedProvisioning) DeepCopy() *SharedProvisioning {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Spark) DeepCopyInto(out *Spark) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spark.
-func (in *Spark) DeepCopy() *Spark {
- if in == nil {
- return nil
- }
- out := new(Spark)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StandardProvisioning) DeepCopyInto(out *StandardProvisioning) {
*out = *in
diff --git a/config/crd/bases/clusters.instaclustr.com_cadences.yaml b/config/crd/bases/clusters.instaclustr.com_cadences.yaml
index 1fd0ba456..f91754c22 100644
--- a/config/crd/bases/clusters.instaclustr.com_cadences.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_cadences.yaml
@@ -193,7 +193,7 @@ spec:
description: The PCI compliance standards relate to the security of
user data and transactional information. Can only be applied clusters
provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch
- and Redis. PCI compliance cannot be enabled if the cluster has Spark.
+ and Redis.
type: boolean
privateNetworkCluster:
type: boolean
diff --git a/config/crd/bases/clusters.instaclustr.com_cassandras.yaml b/config/crd/bases/clusters.instaclustr.com_cassandras.yaml
index b92d77a37..7bf382dcd 100644
--- a/config/crd/bases/clusters.instaclustr.com_cassandras.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_cassandras.yaml
@@ -72,6 +72,28 @@ spec:
type: array
continuousBackup:
type: boolean
+ debezium:
+ description: Adds the specified version of Debezium Connector
+ Cassandra to the Cassandra cluster
+ items:
+ properties:
+ kafkaCdcId:
+ type: string
+ kafkaTopicPrefix:
+ type: string
+ kafkaVpcType:
+ description: KafkaVPCType with only VPC_PEERED supported
+ type: string
+ version:
+ type: string
+ required:
+ - kafkaCdcId
+ - kafkaTopicPrefix
+ - kafkaVpcType
+ - version
+ type: object
+ maxItems: 1
+ type: array
name:
type: string
network:
@@ -115,7 +137,7 @@ spec:
description: The PCI compliance standards relate to the security of
user data and transactional information. Can only be applied clusters
provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch
- and Redis. PCI compliance cannot be enabled if the cluster has Spark.
+ and Redis.
type: boolean
privateNetworkCluster:
type: boolean
@@ -184,15 +206,6 @@ spec:
class nodes. See SLA Tier for more information. Enum: "PRODUCTION"
"NON_PRODUCTION".'
type: string
- spark:
- items:
- properties:
- version:
- type: string
- required:
- - version
- type: object
- type: array
twoFactorDelete:
items:
properties:
diff --git a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml
index 87209b6b0..412f5bd96 100644
--- a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml
@@ -183,7 +183,7 @@ spec:
description: The PCI compliance standards relate to the security of
user data and transactional information. Can only be applied clusters
provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch
- and Redis. PCI compliance cannot be enabled if the cluster has Spark.
+ and Redis.
type: boolean
privateNetworkCluster:
type: boolean
diff --git a/config/crd/bases/clusters.instaclustr.com_kafkas.yaml b/config/crd/bases/clusters.instaclustr.com_kafkas.yaml
index 5fbded2a3..a2b0cfbfe 100644
--- a/config/crd/bases/clusters.instaclustr.com_kafkas.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_kafkas.yaml
@@ -173,7 +173,7 @@ spec:
description: The PCI compliance standards relate to the security of
user data and transactional information. Can only be applied clusters
provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch
- and Redis. PCI compliance cannot be enabled if the cluster has Spark.
+ and Redis.
type: boolean
privateNetworkCluster:
type: boolean
diff --git a/config/crd/bases/clusters.instaclustr.com_opensearches.yaml b/config/crd/bases/clusters.instaclustr.com_opensearches.yaml
index 9852b8b2b..a782573d4 100644
--- a/config/crd/bases/clusters.instaclustr.com_opensearches.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_opensearches.yaml
@@ -166,7 +166,7 @@ spec:
description: The PCI compliance standards relate to the security of
user data and transactional information. Can only be applied clusters
provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch
- and Redis. PCI compliance cannot be enabled if the cluster has Spark.
+ and Redis.
type: boolean
privateNetworkCluster:
type: boolean
diff --git a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml
index 5ec93a122..208795a36 100644
--- a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml
@@ -136,7 +136,7 @@ spec:
description: The PCI compliance standards relate to the security of
user data and transactional information. Can only be applied clusters
provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch
- and Redis. PCI compliance cannot be enabled if the cluster has Spark.
+ and Redis.
type: boolean
pgRestoreFrom:
properties:
diff --git a/config/crd/bases/clusters.instaclustr.com_redis.yaml b/config/crd/bases/clusters.instaclustr.com_redis.yaml
index df4104371..21c7ea9cd 100644
--- a/config/crd/bases/clusters.instaclustr.com_redis.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_redis.yaml
@@ -121,7 +121,7 @@ spec:
description: The PCI compliance standards relate to the security of
user data and transactional information. Can only be applied clusters
provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch
- and Redis. PCI compliance cannot be enabled if the cluster has Spark.
+ and Redis.
type: boolean
privateNetworkCluster:
type: boolean
diff --git a/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml b/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml
index 412c538db..e86b681ef 100644
--- a/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml
@@ -106,7 +106,7 @@ spec:
description: The PCI compliance standards relate to the security of
user data and transactional information. Can only be applied clusters
provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch
- and Redis. PCI compliance cannot be enabled if the cluster has Spark.
+ and Redis.
type: boolean
privateNetworkCluster:
type: boolean
diff --git a/config/samples/clusters_v1beta1_cassandra.yaml b/config/samples/clusters_v1beta1_cassandra.yaml
index b0e1cb1a0..c81e23181 100644
--- a/config/samples/clusters_v1beta1_cassandra.yaml
+++ b/config/samples/clusters_v1beta1_cassandra.yaml
@@ -8,7 +8,12 @@ spec:
privateNetworkCluster: false
dataCentres:
- name: "AWS_cassandra"
- region: "US_WEST_2"
+ region: "US_EAST_1"
+ debezium:
+ - kafkaVpcType: "VPC_PEERED"
+ kafkaTopicPrefix: "test"
+ kafkaCdcId: "5134aed3-7b98-4ebd-95d0-2e181bdb073b"
+ version: "2.0.1"
cloudProvider: "AWS_VPC"
continuousBackup: false
nodesNumber: 2
diff --git a/doc/clusters/cassandra.md b/doc/clusters/cassandra.md
index dfb357daf..39094bebc 100644
--- a/doc/clusters/cassandra.md
+++ b/doc/clusters/cassandra.md
@@ -37,21 +37,22 @@
| concurrency | integer | Number of concurrent nodes to resize during a resize operation. |
### CassandraDataCentreObject
-| Field | Type | Description |
-|----------------------------------|--------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| name | string
**required** | A logical name for the data centre within a cluster. These names must be unique in the cluster. |
-| region | string
**required** | Region of the Data Centre. See the description for node size for a compatible Data Centre for a given node size. |
-| cloudProvider | string
**required** | Name of the cloud provider service in which the Data Centre will be provisioned.
**Enum**: `AWS_VPC` `GCP` `AZURE` `AZURE_AZ`. |
-| accountName | string | For customers running in their own account. Your provider account can be found on the Create Cluster page on the Instaclustr Console, or the "Provider Account" property on any existing cluster. For customers provisioning on Instaclustr's cloud provider accounts, this property may be omitted. |
-| cloudProviderSettings | Array of objects ([CloudProviderSettings](#CloudProviderSettingsObject)) | Cloud provider specific settings for the Data Centre. |
-| network | string
**required** | The private network address block for the Data Centre specified using CIDR address notation. The network must have a prefix length between /12 and /22 and must be part of a private address space. |
-| nodeSize | string
**required**
_mutable_ | Size of the nodes provisioned in the Data Centre. Available node sizes, see [Instaclustr API docs NodeSize](https://instaclustr.redoc.ly/Current/tag/Cassandra-Cluster-V2#paths/~1cluster-management~1v2~1resources~1applications~1cassandra~1clusters~1v2/post!path=dataCentres/nodeSize&t=request). |
-| nodesNumber | int32
**required**
_mutable_ | Total number of nodes in the Data Centre.
Available values: [1…5]. |
-| tags | map[string]string | List of tags to apply to the Data Centre. Tags are metadata labels which allow you to identify, categorise and filter clusters. This can be useful for grouping together clusters into applications, environments, or any category that you require.
**Format**:
tags:
- key: value. |
-| replicationFactor | int32
**required** | Default Replication factor to use for new topic. Also represents the number of racks to use when allocating nodes. |
-| continuousBackup | bool
**required** | Enables commitlog backups and increases the frequency of the default snapshot backups. |
-| privateIpBroadcastForDiscovery | bool
**required** | Enables broadcast of private IPs for auto-discovery. |
-| clientToClusterEncryption | bool
**required** | Enables Client ⇄ Node Encryption. |
+| Field | Type | Description |
+|--------------------------------|--------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| name | string
**required** | A logical name for the data centre within a cluster. These names must be unique in the cluster. |
+| region | string
**required** | Region of the Data Centre. See the description for node size for a compatible Data Centre for a given node size. |
+| cloudProvider | string
**required** | Name of the cloud provider service in which the Data Centre will be provisioned.
**Enum**: `AWS_VPC` `GCP` `AZURE` `AZURE_AZ`. |
+| accountName | string | For customers running in their own account. Your provider account can be found on the Create Cluster page on the Instaclustr Console, or the "Provider Account" property on any existing cluster. For customers provisioning on Instaclustr's cloud provider accounts, this property may be omitted. |
+| cloudProviderSettings | Array of objects ([CloudProviderSettings](#CloudProviderSettingsObject)) | Cloud provider specific settings for the Data Centre. |
+| network | string
**required** | The private network address block for the Data Centre specified using CIDR address notation. The network must have a prefix length between /12 and /22 and must be part of a private address space. |
+| nodeSize | string
**required**
_mutable_ | Size of the nodes provisioned in the Data Centre. Available node sizes, see [Instaclustr API docs NodeSize](https://instaclustr.redoc.ly/Current/tag/Cassandra-Cluster-V2#paths/~1cluster-management~1v2~1resources~1applications~1cassandra~1clusters~1v2/post!path=dataCentres/nodeSize&t=request). |
+| nodesNumber | int32
**required**
_mutable_ | Total number of nodes in the Data Centre.
Available values: [1…5]. |
+| debezium | Array of objects([DebeziumObject](#DebeziumObject)) | Adds the specified version of Debezium Connector Cassandra to the Cassandra cluster |
+| tags | map[string]string | List of tags to apply to the Data Centre. Tags are metadata labels which allow you to identify, categorise and filter clusters. This can be useful for grouping together clusters into applications, environments, or any category that you require.
**Format**:
tags:
- key: value. |
+| replicationFactor | int32
**required** | Default Replication factor to use for new topic. Also represents the number of racks to use when allocating nodes. |
+| continuousBackup | bool
**required** | Enables commitlog backups and increases the frequency of the default snapshot backups. |
+| privateIpBroadcastForDiscovery | bool
**required** | Enables broadcast of private IPs for auto-discovery. |
+| clientToClusterEncryption | bool
**required** | Enables Client ⇄ Node Encryption. |
### CloudProviderSettingsObject
@@ -80,6 +81,16 @@
| customVpcId | string | Custom VPC ID to which the restored cluster will be allocated.
Either restoreToSameVpc or customVpcId must be provided. |
| customVpcNetwork | string | CIDR block in which the cluster will be allocated for a custom VPC. |
+### DebeziumObject
+
+| Field | Type | Description |
+|------------------|-----------------|--------------------------------------------------------------------------------------|
+| kafkaCdcId | string `` | Kafka Cluster Data Centre Id |
+| kafkaVpcType | string | Kafka VPC Type with only VPC_PEERED supported |
+| kafkaTopicPrefix | string | Kafka Topic Prefix |
+| version | string | Adds the specified version of Debezium Connector Cassandra to the Cassandra cluster. |
+
+
## Cluster create flow
To create a Cassandra cluster instance you need to prepare the yaml manifest. Here is an example:
diff --git a/pkg/models/cassandra_apiv2.go b/pkg/models/cassandra_apiv2.go
index 7edc6de30..6f78f8b71 100644
--- a/pkg/models/cassandra_apiv2.go
+++ b/pkg/models/cassandra_apiv2.go
@@ -21,7 +21,6 @@ type CassandraCluster struct {
CassandraVersion string `json:"cassandraVersion"`
LuceneEnabled bool `json:"luceneEnabled"`
PasswordAndUserAuth bool `json:"passwordAndUserAuth"`
- Spark []*Spark `json:"spark,omitempty"`
DataCentres []*CassandraDataCentre `json:"dataCentres"`
Name string `json:"name"`
SLATier string `json:"slaTier"`
@@ -35,14 +34,18 @@ type CassandraCluster struct {
type CassandraDataCentre struct {
DataCentre `json:",inline"`
- ReplicationFactor int `json:"replicationFactor"`
- ContinuousBackup bool `json:"continuousBackup"`
- PrivateIPBroadcastForDiscovery bool `json:"privateIpBroadcastForDiscovery"`
- ClientToClusterEncryption bool `json:"clientToClusterEncryption"`
+ ReplicationFactor int `json:"replicationFactor"`
+ ContinuousBackup bool `json:"continuousBackup"`
+ PrivateIPBroadcastForDiscovery bool `json:"privateIpBroadcastForDiscovery"`
+ ClientToClusterEncryption bool `json:"clientToClusterEncryption"`
+ Debezium []*Debezium `json:"debezium,omitempty"`
}
-type Spark struct {
- Version string `json:"version"`
+type Debezium struct {
+ KafkaVPCType string `json:"kafkaVpcType"`
+ KafkaTopicPrefix string `json:"kafkaTopicPrefix"`
+ KafkaDataCentreID string `json:"kafkaCdcId"`
+ Version string `json:"version"`
}
type CassandraClusterAPIUpdate struct {
diff --git a/pkg/models/errors.go b/pkg/models/errors.go
index f57193624..760cbb9e5 100644
--- a/pkg/models/errors.go
+++ b/pkg/models/errors.go
@@ -32,7 +32,6 @@ var (
ErrImmutableIntraDataCentreReplication = errors.New("intraDataCentreReplication fields are immutable")
ErrImmutableInterDataCentreReplication = errors.New("interDataCentreReplication fields are immutable")
ErrImmutableDataCentresNumber = errors.New("data centres number is immutable")
- ErrImmutableSpark = errors.New("spark field is immutable")
ErrImmutableAWSSecurityGroupFirewallRule = errors.New("awsSecurityGroupFirewallRule is immutable")
ErrImmutableTags = errors.New("tags field is immutable")
ErrTypeAssertion = errors.New("unable to assert type")
@@ -68,4 +67,5 @@ var (
ErrExposeServiceEndpointsNotCreatedYet = errors.New("expose service endpoints is not created yet")
ErrOnlySingleConcurrentResizeAvailable = errors.New("only single concurrent resize is allowed")
ErrBundledUseOnlyResourceUpdateIsNotSupported = errors.New("updating of bundled use resource is not supported")
+ ErrDebeziumImmutable = errors.New("debezium array is immutable")
)
diff --git a/pkg/models/operator.go b/pkg/models/operator.go
index 3b46ad639..5ed063f17 100644
--- a/pkg/models/operator.go
+++ b/pkg/models/operator.go
@@ -104,7 +104,6 @@ const (
PgAppType = "POSTGRESQL"
KafkaConnectAppType = "KAFKA_CONNECT"
CassandraAppType = "APACHE_CASSANDRA"
- SparkAppType = "SPARK"
DefaultPgUsernameValue = "icpostgresql"
DefaultPgDbNameValue = "postgres"