From 99ff81ae688b0ba981cc2e4e8bea79aa0e3e600a Mon Sep 17 00:00:00 2001 From: Saniya Kalamkar Date: Mon, 12 Aug 2024 16:48:59 -0700 Subject: [PATCH 01/23] Using limit instead of request to calculate usage percentage --- ui/src/utils/fetcherHooks/podsViewFetch.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ui/src/utils/fetcherHooks/podsViewFetch.ts b/ui/src/utils/fetcherHooks/podsViewFetch.ts index 9c57592428..ab9efa8b3d 100644 --- a/ui/src/utils/fetcherHooks/podsViewFetch.ts +++ b/ui/src/utils/fetcherHooks/podsViewFetch.ts @@ -49,7 +49,8 @@ export const usePodsViewFetch = ( const containers: string[] = []; const containerSpecMap = new Map(); pod?.spec?.containers?.forEach((container: any) => { - const cpu = container?.resources?.requests?.cpu; + ///const cpu = container?.resources?.requests?.cpu; + const cpu = container?.resources?.limits?.cpu; let cpuParsed: undefined | number; if (cpu) { try { @@ -58,7 +59,8 @@ export const usePodsViewFetch = ( cpuParsed = undefined; } } - const memory = container?.resources?.requests?.memory; + //const memory = container?.resources?.requests?.memory; + const memory = container?.resources?.limits?.memory; let memoryParsed: undefined | number; if (memory) { try { From 26b7635616356d1e18cf54f5a022c48359e39922 Mon Sep 17 00:00:00 2001 From: samhith-kakarla Date: Fri, 2 Aug 2024 15:40:08 -0700 Subject: [PATCH 02/23] chore: add readBytesTotal metric (#1879) Signed-off-by: Samhith Kakarla --- pkg/metrics/metrics.go | 6 +++++ pkg/reduce/data_forward.go | 13 ++++++++++ pkg/sinks/forward/forward.go | 21 ++++++++++++++++ pkg/sources/forward/data_forward.go | 39 +++++++++++++++++------------ pkg/udf/forward/forward.go | 7 +++++- 5 files changed, 69 insertions(+), 17 deletions(-) diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index a2df127e75..31153dabe8 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -58,6 +58,12 @@ var ( Help: "Total number of bytes read", }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) + ReadDataBytesCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Subsystem: "forwarder", + Name: "data_read_bytes_total", + Help: "Total number of Data message bytes read", + }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) + // ReadMessagesError is used to indicate the number of errors messages read ReadMessagesError = promauto.NewCounterVec(prometheus.CounterOpts{ Subsystem: "forwarder", diff --git a/pkg/reduce/data_forward.go b/pkg/reduce/data_forward.go index fdd1c64c40..2ca50f232c 100644 --- a/pkg/reduce/data_forward.go +++ b/pkg/reduce/data_forward.go @@ -285,6 +285,7 @@ func (df *DataForward) replayForAlignedWindows(ctx context.Context, discoveredWA func (df *DataForward) forwardAChunk(ctx context.Context) { readMessages, err := df.fromBufferPartition.Read(ctx, df.opts.readBatchSize) totalBytes := 0 + dataBytes := 0 if err != nil { df.log.Errorw("Failed to read from isb", zap.Error(err)) metrics.ReadMessagesError.With(map[string]string{ @@ -367,7 +368,11 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { } m.Watermark = time.Time(processorWM) totalBytes += len(m.Payload) + if m.Kind == isb.Data { + dataBytes += len(m.Payload) + } } + metrics.ReadBytesCount.With(map[string]string{ metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, @@ -376,6 +381,14 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { metrics.LabelPartitionName: df.fromBufferPartition.GetName(), }).Add(float64(totalBytes)) + metrics.ReadDataBytesCount.With(map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + metrics.LabelPartitionName: df.fromBufferPartition.GetName(), + }).Add(float64(dataBytes)) + // readMessages has to be written to PBQ, acked, etc. df.process(ctx, readMessages) } diff --git a/pkg/sinks/forward/forward.go b/pkg/sinks/forward/forward.go index 7522b7135d..acddb40567 100644 --- a/pkg/sinks/forward/forward.go +++ b/pkg/sinks/forward/forward.go @@ -173,6 +173,8 @@ func (df *DataForward) Start() <-chan struct{} { // buffer-not-reachable, etc., but does not include errors due to WhereTo, etc. func (df *DataForward) forwardAChunk(ctx context.Context) { start := time.Now() + totalBytes := 0 + dataBytes := 0 // There is a chance that we have read the message and the container got forcefully terminated before processing. To provide // at-least-once semantics for reading, during restart we will have to reprocess all unacknowledged messages. It is the // responsibility of the Read function to do that. @@ -214,13 +216,32 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { var readOffsets = make([]isb.Offset, len(readMessages)) for idx, m := range readMessages { readOffsets[idx] = m.ReadOffset + totalBytes += len(m.Payload) if m.Kind == isb.Data { dataMessages = append(dataMessages, m) + dataBytes += len(m.Payload) } } + metrics.ReadDataMessagesCount.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: df.fromBufferPartition.GetName()}).Add(float64(len(dataMessages))) metrics.ReadMessagesCount.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: df.fromBufferPartition.GetName()}).Add(float64(len(readMessages))) + metrics.ReadBytesCount.With(map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeSink), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + metrics.LabelPartitionName: df.fromBufferPartition.GetName(), + }).Add(float64(totalBytes)) + + metrics.ReadDataBytesCount.With(map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeSink), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + metrics.LabelPartitionName: df.fromBufferPartition.GetName(), + }).Add(float64(dataBytes)) + // fetch watermark if available // TODO: make it async (concurrent and wait later) // let's track only the first element's watermark. This is important because we reassign the watermark we fetch diff --git a/pkg/sources/forward/data_forward.go b/pkg/sources/forward/data_forward.go index cce95fb726..48ff97da3a 100644 --- a/pkg/sources/forward/data_forward.go +++ b/pkg/sources/forward/data_forward.go @@ -190,6 +190,7 @@ func (df *DataForward) Start() <-chan struct{} { // buffer-not-reachable, etc., but do not include errors due to user code transformer, WhereTo, etc. func (df *DataForward) forwardAChunk(ctx context.Context) { start := time.Now() + totalBytes := 0 // There is a chance that we have read the message and the container got forcefully terminated before processing. To provide // at-least-once semantics for reading, during the restart we will have to reprocess all unacknowledged messages. It is the // responsibility of the Read function to do that. @@ -264,11 +265,30 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { }).Add(float64(len(readMessages))) // store the offsets of the messages we read from source + + // store the offsets of the messages we read from ISB var readOffsets = make([]isb.Offset, len(readMessages)) for idx, m := range readMessages { + totalBytes += len(m.Payload) + readOffsets[idx] = m.ReadOffset } + metrics.ReadBytesCount.With(map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeSource), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + metrics.LabelPartitionName: df.reader.GetName(), + }).Add(float64(totalBytes)) + metrics.ReadDataBytesCount.With(map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeSource), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + metrics.LabelPartitionName: df.reader.GetName(), + }).Add(float64(totalBytes)) + // source data transformer applies filtering and assigns event time to source data, which doesn't require watermarks. // hence we assign time.UnixMilli(-1) to processorWM. processorWM := wmb.Watermark(time.UnixMilli(-1)) @@ -297,15 +317,9 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { df.concurrentApplyTransformer(ctx, transformerCh) }() } + concurrentTransformerProcessingStart := time.Now() for idx, m := range readMessages { - metrics.ReadBytesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName(), - }).Add(float64(len(m.Payload))) // assign watermark to the message m.Watermark = time.Time(processorWM) @@ -331,15 +345,8 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { metrics.LabelPartitionName: df.reader.GetName(), }).Observe(float64(time.Since(concurrentTransformerProcessingStart).Microseconds())) } else { - for idx, m := range readMessages { - metrics.ReadBytesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName(), - }).Add(float64(len(m.Payload))) + for idx, m := range readMessages { // assign watermark to the message m.Watermark = time.Time(processorWM) readWriteMessagePairs[idx].ReadMessage = m @@ -347,8 +354,8 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { // thus, the unmodified read message will be stored as the corresponding writeMessage in readWriteMessagePairs readWriteMessagePairs[idx].WriteMessages = []*isb.WriteMessage{{Message: m.Message}} } - } + } // publish source watermark and assign IsLate attribute based on new event time. var writeMessages []*isb.WriteMessage var transformedReadMessages []*isb.ReadMessage diff --git a/pkg/udf/forward/forward.go b/pkg/udf/forward/forward.go index 887d2b90d6..53efc945da 100644 --- a/pkg/udf/forward/forward.go +++ b/pkg/udf/forward/forward.go @@ -177,6 +177,8 @@ func (isdf *InterStepDataForward) Start() <-chan struct{} { // buffer-not-reachable, etc., but does not include errors due to user code UDFs, WhereTo, etc. func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { start := time.Now() + totalBytes := 0 + dataBytes := 0 // There is a chance that we have read the message and the container got forcefully terminated before processing. To provide // at-least-once semantics for reading, during restart we will have to reprocess all unacknowledged messages. It is the // responsibility of the Read function to do that. @@ -224,12 +226,16 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { var readOffsets = make([]isb.Offset, len(readMessages)) for idx, m := range readMessages { readOffsets[idx] = m.ReadOffset + totalBytes += len(m.Payload) if m.Kind == isb.Data { dataMessages = append(dataMessages, m) + dataBytes += len(m.Payload) } } metrics.ReadDataMessagesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(len(dataMessages))) metrics.ReadMessagesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(len(readMessages))) + metrics.ReadBytesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(totalBytes)) + metrics.ReadDataBytesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(dataBytes)) // fetch watermark if available // TODO: make it async (concurrent and wait later) @@ -242,7 +248,6 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { for _, msg := range dataMessages { msg.Watermark = time.Time(processorWM) // emit message size metric - metrics.ReadBytesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(len(msg.Payload))) } var udfResults []isb.ReadWriteMessagePair From de99a792abd14b4e802d6f5dd7fee8e18bd0d67e Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Sun, 4 Aug 2024 12:42:42 -0700 Subject: [PATCH 03/23] chore: refactor and output messages to the status when unhealthy (#1895) Signed-off-by: Derek Wang --- api/json-schema/schema.json | 3 - api/openapi-spec/swagger.json | 3 - config/advanced-install/minimal-crds.yaml | 27 +- ...w.numaproj.io_interstepbufferservices.yaml | 6 +- .../full/numaflow.numaproj.io_pipelines.yaml | 8 +- .../full/numaflow.numaproj.io_vertices.yaml | 17 +- ...w.numaproj.io_interstepbufferservices.yaml | 6 +- .../numaflow.numaproj.io_pipelines.yaml | 6 +- .../numaflow.numaproj.io_vertices.yaml | 15 +- config/install.yaml | 31 +-- config/namespace-install.yaml | 31 +-- docs/APIs.md | 52 ++-- pkg/apis/numaflow/v1alpha1/generated.pb.go | 138 +++++----- pkg/apis/numaflow/v1alpha1/generated.proto | 26 +- pkg/apis/numaflow/v1alpha1/isbsvc_types.go | 76 +++--- .../numaflow/v1alpha1/isbsvc_types_test.go | 2 +- .../numaflow/v1alpha1/openapi_generated.go | 73 +++-- pkg/apis/numaflow/v1alpha1/pipeline_types.go | 85 ++++-- .../numaflow/v1alpha1/pipeline_types_test.go | 53 +++- pkg/apis/numaflow/v1alpha1/vertex_types.go | 36 +-- .../numaflow/v1alpha1/vertex_types_test.go | 4 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- pkg/reconciler/cmd/start.go | 10 - .../isbsvc/installer/installer_test.go | 2 +- pkg/reconciler/isbsvc/installer/jetstream.go | 8 +- .../isbsvc/installer/native_redis.go | 8 +- pkg/reconciler/isbsvc/installer/watcher.go | 36 --- .../isbsvc/installer/watcher_test.go | 56 ---- pkg/reconciler/pipeline/controller.go | 63 +++-- pkg/reconciler/pipeline/controller_test.go | 2 +- pkg/reconciler/pipeline/watcher.go | 62 ----- pkg/reconciler/pipeline/watcher_test.go | 133 --------- pkg/reconciler/util.go | 135 +++++++++ pkg/reconciler/util_test.go | 256 ++++++++++++++++++ pkg/reconciler/vertex/controller.go | 53 ++-- pkg/reconciler/vertex/controller_test.go | 39 --- pkg/reconciler/vertex/watcher.go | 32 --- pkg/reconciler/vertex/watcher_test.go | 51 ---- pkg/sources/kafka/handler_test.go | 4 +- pkg/sources/kafka/reader_test.go | 4 +- 40 files changed, 835 insertions(+), 819 deletions(-) delete mode 100644 pkg/reconciler/isbsvc/installer/watcher.go delete mode 100644 pkg/reconciler/isbsvc/installer/watcher_test.go delete mode 100644 pkg/reconciler/pipeline/watcher.go delete mode 100644 pkg/reconciler/pipeline/watcher_test.go create mode 100644 pkg/reconciler/util.go create mode 100644 pkg/reconciler/util_test.go delete mode 100644 pkg/reconciler/vertex/watcher.go delete mode 100644 pkg/reconciler/vertex/watcher_test.go diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index 672241cef4..410d4ad9db 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -18627,7 +18627,6 @@ "type": "string" }, "observedGeneration": { - "description": "ObservedGeneration stores the generation value observed by the controller.", "format": "int64", "type": "integer" }, @@ -19316,7 +19315,6 @@ "type": "string" }, "observedGeneration": { - "description": "ObservedGeneration stores the generation value observed by the controller.", "format": "int64", "type": "integer" }, @@ -20188,7 +20186,6 @@ "type": "string" }, "observedGeneration": { - "description": "ObservedGeneration stores the generation value observed by the controller.", "format": "int64", "type": "integer" }, diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 54a84b6a45..812ab63a65 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -18623,7 +18623,6 @@ "type": "string" }, "observedGeneration": { - "description": "ObservedGeneration stores the generation value observed by the controller.", "type": "integer", "format": "int64" }, @@ -19303,7 +19302,6 @@ "type": "string" }, "observedGeneration": { - "description": "ObservedGeneration stores the generation value observed by the controller.", "type": "integer", "format": "int64" }, @@ -20170,7 +20168,6 @@ "type": "string" }, "observedGeneration": { - "description": "ObservedGeneration stores the generation value observed by the controller.", "type": "integer", "format": "int64" }, diff --git a/config/advanced-install/minimal-crds.yaml b/config/advanced-install/minimal-crds.yaml index cde33e4c09..1d72f226c9 100644 --- a/config/advanced-install/minimal-crds.yaml +++ b/config/advanced-install/minimal-crds.yaml @@ -20,12 +20,12 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -69,9 +69,6 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .status.vertexCount name: Vertices type: integer @@ -98,6 +95,9 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -141,18 +141,21 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.reason - name: Reason - type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .spec.replicas name: Desired type: string - jsonPath: .status.replicas name: Current type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.reason + name: Reason + type: string + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: diff --git a/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml b/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml index 472e2f0100..c09fb798f4 100644 --- a/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml @@ -24,12 +24,12 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index 3e8feaf631..0f8f7d3c77 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -21,9 +21,6 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .status.vertexCount name: Vertices type: integer @@ -50,6 +47,9 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -116,7 +116,6 @@ spec: enum: - "" - Running - - Succeeded - Failed - Pausing - Paused @@ -9739,7 +9738,6 @@ spec: enum: - "" - Running - - Succeeded - Failed - Pausing - Paused diff --git a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml index b192548491..e832cec26a 100644 --- a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml @@ -21,18 +21,21 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.reason - name: Reason - type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .spec.replicas name: Desired type: string - jsonPath: .status.replicas name: Current type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.reason + name: Reason + type: string + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -5455,9 +5458,7 @@ spec: phase: enum: - "" - - Pending - Running - - Succeeded - Failed type: string reason: diff --git a/config/base/crds/minimal/numaflow.numaproj.io_interstepbufferservices.yaml b/config/base/crds/minimal/numaflow.numaproj.io_interstepbufferservices.yaml index 254786650d..19b2d1788f 100644 --- a/config/base/crds/minimal/numaflow.numaproj.io_interstepbufferservices.yaml +++ b/config/base/crds/minimal/numaflow.numaproj.io_interstepbufferservices.yaml @@ -20,12 +20,12 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: diff --git a/config/base/crds/minimal/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/minimal/numaflow.numaproj.io_pipelines.yaml index ddf9c94236..f59789750f 100644 --- a/config/base/crds/minimal/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/minimal/numaflow.numaproj.io_pipelines.yaml @@ -17,9 +17,6 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .status.vertexCount name: Vertices type: integer @@ -46,6 +43,9 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: diff --git a/config/base/crds/minimal/numaflow.numaproj.io_vertices.yaml b/config/base/crds/minimal/numaflow.numaproj.io_vertices.yaml index 1ba87503cd..b0f1c1ba0f 100644 --- a/config/base/crds/minimal/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/minimal/numaflow.numaproj.io_vertices.yaml @@ -17,18 +17,21 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.reason - name: Reason - type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .spec.replicas name: Desired type: string - jsonPath: .status.replicas name: Current type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.reason + name: Reason + type: string + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: diff --git a/config/install.yaml b/config/install.yaml index fe76099057..1af95e16bf 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -23,12 +23,12 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -2633,9 +2633,6 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .status.vertexCount name: Vertices type: integer @@ -2662,6 +2659,9 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -2728,7 +2728,6 @@ spec: enum: - "" - Running - - Succeeded - Failed - Pausing - Paused @@ -12351,7 +12350,6 @@ spec: enum: - "" - Running - - Succeeded - Failed - Pausing - Paused @@ -12409,18 +12407,21 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.reason - name: Reason - type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .spec.replicas name: Desired type: string - jsonPath: .status.replicas name: Current type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.reason + name: Reason + type: string + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -17843,9 +17844,7 @@ spec: phase: enum: - "" - - Pending - Running - - Succeeded - Failed type: string reason: diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 5ebdcc3712..56526100f3 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -23,12 +23,12 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -2633,9 +2633,6 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .status.vertexCount name: Vertices type: integer @@ -2662,6 +2659,9 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -2728,7 +2728,6 @@ spec: enum: - "" - Running - - Succeeded - Failed - Pausing - Paused @@ -12351,7 +12350,6 @@ spec: enum: - "" - Running - - Succeeded - Failed - Pausing - Paused @@ -12409,18 +12407,21 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.reason - name: Reason - type: string - - jsonPath: .status.message - name: Message - type: string - jsonPath: .spec.replicas name: Desired type: string - jsonPath: .status.replicas name: Current type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.reason + name: Reason + type: string + - jsonPath: .status.message + name: Message + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -17843,9 +17844,7 @@ spec: phase: enum: - "" - - Pending - Running - - Succeeded - Failed type: string reason: diff --git a/docs/APIs.md b/docs/APIs.md index 3b6632fa1c..8b42812244 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -4081,12 +4081,6 @@ BufferServiceConfig -

- -ObservedGeneration stores the generation value observed by the -controller. -

- @@ -6681,12 +6675,6 @@ Kubernetes meta/v1.Time -

- -ObservedGeneration stores the generation value observed by the -controller. -

- @@ -9876,13 +9864,17 @@ Description -phase
- VertexPhase - +Status
+ Status +

+ +(Members of Status are embedded into this type.) +

+ @@ -9891,7 +9883,9 @@ Description -reason
string +phase
+ VertexPhase + @@ -9904,7 +9898,7 @@ Description -message
string +replicas
uint32 @@ -9917,7 +9911,7 @@ Description -replicas
uint32 +selector
string @@ -9930,7 +9924,7 @@ Description -selector
string +reason
string @@ -9943,9 +9937,7 @@ Description -lastScaledAt
- -Kubernetes meta/v1.Time +message
string @@ -9958,17 +9950,13 @@ Kubernetes meta/v1.Time -Status
- Status +lastScaledAt
+ +Kubernetes meta/v1.Time -

- -(Members of Status are embedded into this type.) -

- @@ -9982,12 +9970,6 @@ Kubernetes meta/v1.Time -

- -ObservedGeneration stores the generation value observed by the -controller. -

- diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index a7ec5071ed..ec8ed47f5c 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -2501,7 +2501,7 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 7214 bytes of a gzipped FileDescriptorProto + // 7203 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5b, 0x6c, 0x24, 0xc7, 0x75, 0xa8, 0xe6, 0x45, 0xce, 0x9c, 0x21, 0xb9, 0xbb, 0xb5, 0xd2, 0x8a, 0x4b, 0xad, 0x76, 0xd6, 0xed, 0x2b, 0xdd, 0xf5, 0xb5, 0x4d, 0x5e, 0xf1, 0x4a, 0x96, 0xec, 0x6b, 0x5b, 0xe2, 0x90, 0x4b, @@ -2933,26 +2933,26 @@ var fileDescriptor_9d0d1b17d3865563 = []byte{ 0xbe, 0xf8, 0x65, 0xa5, 0xf2, 0x7d, 0x2f, 0x2b, 0xed, 0x41, 0x6d, 0xc7, 0x73, 0xbb, 0x3c, 0x6b, 0x4e, 0xfe, 0x6f, 0xce, 0xb5, 0x1c, 0x32, 0x25, 0xfa, 0xc7, 0xb8, 0x48, 0xb4, 0x2e, 0x2b, 0xfc, 0x18, 0x91, 0xe2, 0x4e, 0x69, 0x57, 0x50, 0x1d, 0x3b, 0x49, 0xaa, 0xe1, 0x59, 0xb2, 0x29, 0xb0, - 0xa3, 0x22, 0x93, 0xcc, 0x94, 0x1b, 0x7f, 0x67, 0x32, 0xe5, 0xb4, 0x5f, 0x2c, 0xab, 0x03, 0xac, - 0x95, 0xaa, 0xab, 0x52, 0x18, 0x52, 0x57, 0x45, 0x16, 0xd8, 0x8b, 0xe7, 0x74, 0x3d, 0x0d, 0x63, - 0x1e, 0xd5, 0x7d, 0x59, 0x7a, 0x24, 0x96, 0x2f, 0x81, 0xbc, 0x15, 0x65, 0x6f, 0x3c, 0xf7, 0xab, - 0xf8, 0x80, 0xdc, 0xaf, 0xf7, 0xc5, 0x16, 0x88, 0x48, 0xee, 0x0d, 0xf7, 0x7a, 0xc6, 0x22, 0xe1, - 0xd9, 0x15, 0xf2, 0x7f, 0xdf, 0x2b, 0xe9, 0xec, 0x0a, 0xf9, 0x9f, 0xec, 0x21, 0x04, 0x31, 0x61, - 0xc2, 0xd6, 0xfd, 0x80, 0x87, 0xed, 0xcc, 0x85, 0x60, 0x84, 0xc4, 0xb2, 0x70, 0x1b, 0xad, 0xc5, - 0xf0, 0x60, 0x02, 0x6b, 0x2c, 0x0f, 0x6f, 0xfc, 0x74, 0xf3, 0xf0, 0xb2, 0xf3, 0xb2, 0xaa, 0x23, - 0xe5, 0x65, 0x1d, 0x94, 0x20, 0x65, 0x3b, 0xfd, 0x2c, 0x42, 0xf1, 0x5f, 0x2a, 0x42, 0xf1, 0x76, - 0x11, 0xa2, 0x83, 0xe0, 0x98, 0xa9, 0x16, 0xaf, 0x42, 0xb5, 0xab, 0xdf, 0x5d, 0xa2, 0xb6, 0xbe, - 0x9f, 0xe7, 0xcf, 0x4e, 0xd6, 0x25, 0x0e, 0x0c, 0xb1, 0x11, 0x1f, 0xc0, 0x0a, 0x6b, 0xdf, 0xe5, - 0xf6, 0x38, 0x47, 0x65, 0xf4, 0x84, 0x4f, 0x2b, 0x7a, 0xc6, 0x18, 0x19, 0xed, 0xcf, 0x8b, 0x20, - 0x8b, 0x24, 0x12, 0x0a, 0x95, 0x1d, 0xeb, 0x2e, 0x35, 0x73, 0xa7, 0x1d, 0xc6, 0xfe, 0x82, 0x4a, - 0xb8, 0xd4, 0x79, 0x03, 0x0a, 0xec, 0xdc, 0x57, 0x2a, 0x42, 0x24, 0x92, 0x7f, 0x39, 0x7c, 0xa5, - 0xf1, 0x50, 0x8b, 0xf4, 0x95, 0x8a, 0x26, 0x54, 0x34, 0x84, 0x6b, 0x96, 0xc7, 0xa9, 0x25, 0x4b, - 0xf3, 0xb8, 0x66, 0x63, 0xf1, 0x6e, 0xe5, 0x9a, 0xf5, 0xc5, 0x05, 0x64, 0x49, 0xa3, 0xf9, 0xc9, - 0x6f, 0x7d, 0xf7, 0xf2, 0x23, 0xdf, 0xfe, 0xee, 0xe5, 0x47, 0xbe, 0xf3, 0xdd, 0xcb, 0x8f, 0x7c, - 0xfe, 0xf0, 0x72, 0xe1, 0x5b, 0x87, 0x97, 0x0b, 0xdf, 0x3e, 0xbc, 0x5c, 0xf8, 0xce, 0xe1, 0xe5, - 0xc2, 0xdf, 0x1d, 0x5e, 0x2e, 0xfc, 0xca, 0xdf, 0x5f, 0x7e, 0xe4, 0x13, 0xcf, 0x47, 0x53, 0x98, - 0x53, 0x53, 0x98, 0x53, 0x04, 0xe7, 0x7a, 0x9d, 0xf6, 0x1c, 0x9b, 0x42, 0xd4, 0xa2, 0xa6, 0xf0, - 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x16, 0xbc, 0x15, 0xa3, 0x10, 0x86, 0x00, 0x00, + 0xa3, 0x22, 0x93, 0xcc, 0x94, 0x1b, 0x7f, 0x67, 0x32, 0xe5, 0xb4, 0x5f, 0x2c, 0xab, 0x03, 0xec, + 0xa1, 0x2b, 0xe0, 0x22, 0x2b, 0xf9, 0xc5, 0x93, 0xc7, 0xde, 0x17, 0x5b, 0x36, 0x22, 0xe5, 0x37, + 0x3c, 0x01, 0x32, 0x96, 0x0e, 0xcf, 0xb9, 0x90, 0xff, 0x06, 0x3f, 0x90, 0x73, 0x21, 0xff, 0xa9, + 0x3d, 0x84, 0x20, 0x4f, 0xc3, 0x98, 0x47, 0x75, 0xdf, 0x75, 0xd2, 0x77, 0x53, 0x91, 0xb7, 0xa2, + 0xec, 0x8d, 0x27, 0xb0, 0x8d, 0x3d, 0x20, 0x81, 0xcd, 0x84, 0x09, 0x5b, 0xf7, 0x03, 0x1e, 0x1f, + 0x34, 0x17, 0xd4, 0x3f, 0x57, 0x1c, 0x27, 0x83, 0x2d, 0xdc, 0xaf, 0x6b, 0x31, 0x3c, 0x98, 0xc0, + 0x3a, 0x24, 0x2f, 0xab, 0x3a, 0x52, 0x5e, 0xd6, 0x41, 0x09, 0x52, 0xb6, 0xd3, 0xcf, 0x22, 0x14, + 0xff, 0xa5, 0x22, 0x14, 0x6f, 0x17, 0x21, 0x3a, 0x08, 0x8e, 0x99, 0x6a, 0xf1, 0x2a, 0x54, 0xbb, + 0xfa, 0xdd, 0x25, 0x6a, 0xeb, 0xfb, 0x79, 0xfe, 0xec, 0x64, 0x5d, 0xe2, 0xc0, 0x10, 0x1b, 0xf1, + 0x01, 0xac, 0xb0, 0xf6, 0x5d, 0x6e, 0x8f, 0x73, 0x54, 0x46, 0x4f, 0xf8, 0xb4, 0xa2, 0x67, 0x8c, + 0x91, 0xd1, 0xfe, 0xbc, 0x08, 0xb2, 0x48, 0x22, 0xa1, 0x50, 0xd9, 0xb1, 0xee, 0x52, 0x33, 0x77, + 0xda, 0x61, 0xec, 0x2f, 0xa8, 0x84, 0x4b, 0x9d, 0x37, 0xa0, 0xc0, 0xce, 0x7d, 0xa5, 0x22, 0x44, + 0x22, 0xf9, 0x97, 0xc3, 0x57, 0x1a, 0x0f, 0xb5, 0x48, 0x5f, 0xa9, 0x68, 0x42, 0x45, 0x43, 0xb8, + 0x66, 0x79, 0x9c, 0x5a, 0xb2, 0x34, 0x8f, 0x6b, 0x36, 0x16, 0xef, 0x56, 0xae, 0x59, 0x5f, 0x5c, + 0x40, 0x96, 0x34, 0x9a, 0x9f, 0xfc, 0xd6, 0x77, 0x2f, 0x3f, 0xf2, 0xed, 0xef, 0x5e, 0x7e, 0xe4, + 0x3b, 0xdf, 0xbd, 0xfc, 0xc8, 0xe7, 0x0f, 0x2f, 0x17, 0xbe, 0x75, 0x78, 0xb9, 0xf0, 0xed, 0xc3, + 0xcb, 0x85, 0xef, 0x1c, 0x5e, 0x2e, 0xfc, 0xdd, 0xe1, 0xe5, 0xc2, 0xaf, 0xfc, 0xfd, 0xe5, 0x47, + 0x3e, 0xf1, 0x7c, 0x34, 0x85, 0x39, 0x35, 0x85, 0x39, 0x45, 0x70, 0xae, 0xd7, 0x69, 0xcf, 0xb1, + 0x29, 0x44, 0x2d, 0x6a, 0x0a, 0xff, 0x19, 0x00, 0x00, 0xff, 0xff, 0x2c, 0xe6, 0x2a, 0xfb, 0x10, + 0x86, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -8066,7 +8066,7 @@ func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x40 { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.LastScaledAt.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -8075,38 +8075,38 @@ func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x3a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 i -= len(m.Reason) copy(dAtA[i:], m.Reason) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x2a i -= len(m.Selector) copy(dAtA[i:], m.Selector) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) i-- - dAtA[i] = 0x2a - { - size, err := m.LastScaledAt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- dAtA[i] = 0x18 - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 i -= len(m.Phase) copy(dAtA[i:], m.Phase) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) i-- + dAtA[i] = 0x12 + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -10186,18 +10186,18 @@ func (m *VertexStatus) Size() (n int) { } var l int _ = l - l = len(m.Phase) + l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) + l = len(m.Phase) n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.Replicas)) - l = m.LastScaledAt.Size() - n += 1 + l + sovGenerated(uint64(l)) l = len(m.Selector) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Reason) n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastScaledAt.Size() n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.ObservedGeneration)) return n @@ -11553,13 +11553,13 @@ func (this *VertexStatus) String() string { return "nil" } s := strings.Join([]string{`&VertexStatus{`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "Status", "Status", 1), `&`, ``, 1) + `,`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `LastScaledAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastScaledAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "Status", "Status", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastScaledAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastScaledAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `}`, }, "") @@ -27635,9 +27635,9 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -27647,27 +27647,28 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = VertexPhase(dAtA[iNdEx:postIndex]) + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -27695,7 +27696,7 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Phase = VertexPhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -27718,9 +27719,9 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { } case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScaledAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -27730,28 +27731,27 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastScaledAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Selector = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -27779,11 +27779,11 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Selector = string(dAtA[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -27811,11 +27811,11 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastScaledAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27842,7 +27842,7 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastScaledAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 69851f9ded..b3dd64f6b8 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -598,8 +598,8 @@ message IdleSource { // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Type",type=string,JSONPath=`.status.type` // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=true message InterStepBufferService { @@ -636,7 +636,6 @@ message InterStepBufferServiceStatus { optional string type = 5; - // ObservedGeneration stores the generation value observed by the controller. optional int64 observedGeneration = 6; } @@ -925,7 +924,6 @@ message PersistenceStrategy { // +kubebuilder:resource:shortName=pl // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` // +kubebuilder:printcolumn:name="Vertices",type=integer,JSONPath=`.status.vertexCount` // +kubebuilder:printcolumn:name="Sources",type=integer,JSONPath=`.status.sourceCount`,priority=10 // +kubebuilder:printcolumn:name="Sinks",type=integer,JSONPath=`.status.sinkCount`,priority=10 @@ -933,6 +931,7 @@ message PersistenceStrategy { // +kubebuilder:printcolumn:name="Map UDFs",type=integer,JSONPath=`.status.mapUDFCount`,priority=10 // +kubebuilder:printcolumn:name="Reduce UDFs",type=integer,JSONPath=`.status.reduceUDFCount`,priority=10 // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=true message Pipeline { @@ -1034,7 +1033,6 @@ message PipelineStatus { optional uint32 reduceUDFCount = 10; - // ObservedGeneration stores the generation value observed by the controller. optional int64 observedGeneration = 11; } @@ -1394,10 +1392,11 @@ message UDTransformer { // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.reason` -// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` // +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.replicas` // +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.replicas` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.reason` +// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=true message Vertex { @@ -1473,21 +1472,20 @@ message VertexSpec { } message VertexStatus { - optional string phase = 1; - - optional string reason = 6; + optional Status status = 1; - optional string message = 2; + optional string phase = 2; optional uint32 replicas = 3; - optional string selector = 5; + optional string selector = 4; + + optional string reason = 5; - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaledAt = 4; + optional string message = 6; - optional Status status = 7; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaledAt = 7; - // ObservedGeneration stores the generation value observed by the controller. optional int64 observedGeneration = 8; } diff --git a/pkg/apis/numaflow/v1alpha1/isbsvc_types.go b/pkg/apis/numaflow/v1alpha1/isbsvc_types.go index a1f7db28bd..1c7424274a 100644 --- a/pkg/apis/numaflow/v1alpha1/isbsvc_types.go +++ b/pkg/apis/numaflow/v1alpha1/isbsvc_types.go @@ -54,8 +54,8 @@ const ( // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Type",type=string,JSONPath=`.status.type` // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=true type InterStepBufferService struct { @@ -87,73 +87,73 @@ type BufferServiceConfig struct { } type InterStepBufferServiceStatus struct { - Status `json:",inline" protobuf:"bytes,1,opt,name=status"` - Phase ISBSvcPhase `json:"phase,omitempty" protobuf:"bytes,2,opt,name=phase,casttype=ISBSvcPhase"` - Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` - Config BufferServiceConfig `json:"config,omitempty" protobuf:"bytes,4,opt,name=config"` - Type ISBSvcType `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` - // ObservedGeneration stores the generation value observed by the controller. - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,6,opt,name=observedGeneration"` + Status `json:",inline" protobuf:"bytes,1,opt,name=status"` + Phase ISBSvcPhase `json:"phase,omitempty" protobuf:"bytes,2,opt,name=phase,casttype=ISBSvcPhase"` + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + Config BufferServiceConfig `json:"config,omitempty" protobuf:"bytes,4,opt,name=config"` + Type ISBSvcType `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,6,opt,name=observedGeneration"` } -func (isbsvc *InterStepBufferServiceStatus) SetPhase(phase ISBSvcPhase, msg string) { - isbsvc.Phase = phase - isbsvc.Message = msg +func (iss *InterStepBufferServiceStatus) SetPhase(phase ISBSvcPhase, msg string) { + iss.Phase = phase + iss.Message = msg } -func (isbsvc *InterStepBufferServiceStatus) SetType(typ ISBSvcType) { - isbsvc.Type = typ +func (iss *InterStepBufferServiceStatus) SetType(typ ISBSvcType) { + iss.Type = typ } // InitConditions sets conditions to Unknown state. -func (isbsvc *InterStepBufferServiceStatus) InitConditions() { - isbsvc.InitializeConditions(ISBSvcConditionConfigured, ISBSvcConditionDeployed, ISBSvcConditionChildrenResourcesHealthy) - isbsvc.SetPhase(ISBSvcPhasePending, "") +func (iss *InterStepBufferServiceStatus) InitConditions() { + iss.InitializeConditions(ISBSvcConditionConfigured, ISBSvcConditionDeployed, ISBSvcConditionChildrenResourcesHealthy) + iss.SetPhase(ISBSvcPhasePending, "") } // MarkConfigured set the InterStepBufferService has valid configuration. -func (isbsvc *InterStepBufferServiceStatus) MarkConfigured() { - isbsvc.MarkTrue(ISBSvcConditionConfigured) - isbsvc.SetPhase(ISBSvcPhasePending, "") +func (iss *InterStepBufferServiceStatus) MarkConfigured() { + iss.MarkTrue(ISBSvcConditionConfigured) + iss.SetPhase(ISBSvcPhasePending, "") } // MarkNotConfigured the InterStepBufferService has configuration. -func (isbsvc *InterStepBufferServiceStatus) MarkNotConfigured(reason, message string) { - isbsvc.MarkFalse(ISBSvcConditionConfigured, reason, message) - isbsvc.SetPhase(ISBSvcPhaseFailed, message) +func (iss *InterStepBufferServiceStatus) MarkNotConfigured(reason, message string) { + iss.MarkFalse(ISBSvcConditionConfigured, reason, message) + iss.SetPhase(ISBSvcPhaseFailed, message) } // MarkDeployed set the InterStepBufferService has been deployed. -func (isbsvc *InterStepBufferServiceStatus) MarkDeployed() { - isbsvc.MarkTrue(ISBSvcConditionDeployed) - isbsvc.SetPhase(ISBSvcPhaseRunning, "") +func (iss *InterStepBufferServiceStatus) MarkDeployed() { + iss.MarkTrue(ISBSvcConditionDeployed) + iss.SetPhase(ISBSvcPhaseRunning, "") } // MarkDeployFailed set the InterStepBufferService deployment failed -func (isbsvc *InterStepBufferServiceStatus) MarkDeployFailed(reason, message string) { - isbsvc.MarkFalse(ISBSvcConditionDeployed, reason, message) - isbsvc.SetPhase(ISBSvcPhaseFailed, message) +func (iss *InterStepBufferServiceStatus) MarkDeployFailed(reason, message string) { + iss.MarkFalse(ISBSvcConditionDeployed, reason, message) + iss.SetPhase(ISBSvcPhaseFailed, message) } // SetObservedGeneration sets the Status ObservedGeneration -func (isbsvc *InterStepBufferServiceStatus) SetObservedGeneration(value int64) { - isbsvc.ObservedGeneration = value +func (iss *InterStepBufferServiceStatus) SetObservedGeneration(value int64) { + iss.ObservedGeneration = value } // IsHealthy indicates whether the InterStepBufferService is healthy or not -func (isbsvc *InterStepBufferServiceStatus) IsHealthy() bool { - if isbsvc.Phase != ISBSvcPhaseRunning { +func (iss *InterStepBufferServiceStatus) IsHealthy() bool { + if iss.Phase != ISBSvcPhaseRunning { return false } - return isbsvc.IsReady() + return iss.IsReady() } -// MarkChildrenResourceNotHealthy marks the children resources as not healthy -func (isbsvc *InterStepBufferServiceStatus) MarkChildrenResourceNotHealthy(reason, message string) { - isbsvc.MarkFalse(ISBSvcConditionChildrenResourcesHealthy, reason, message) +// MarkChildrenResourceUnHealthy marks the children resources as not healthy +func (iss *InterStepBufferServiceStatus) MarkChildrenResourceUnHealthy(reason, message string) { + iss.MarkFalse(ISBSvcConditionChildrenResourcesHealthy, reason, message) + iss.Message = reason + ": " + message } // MarkChildrenResourceHealthy marks the children resources as healthy -func (isbsvc *InterStepBufferServiceStatus) MarkChildrenResourceHealthy(reason, message string) { - isbsvc.MarkTrueWithReason(ISBSvcConditionChildrenResourcesHealthy, reason, message) +func (iss *InterStepBufferServiceStatus) MarkChildrenResourceHealthy(reason, message string) { + iss.MarkTrueWithReason(ISBSvcConditionChildrenResourcesHealthy, reason, message) } diff --git a/pkg/apis/numaflow/v1alpha1/isbsvc_types_test.go b/pkg/apis/numaflow/v1alpha1/isbsvc_types_test.go index 6161aad1ad..22f050ad33 100644 --- a/pkg/apis/numaflow/v1alpha1/isbsvc_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/isbsvc_types_test.go @@ -77,7 +77,7 @@ func Test_ISBSvcMarkStatus(t *testing.T) { assert.Equal(t, metav1.ConditionTrue, c.Status) } } - s.MarkChildrenResourceNotHealthy("reason", "message") + s.MarkChildrenResourceUnHealthy("reason", "message") for _, c := range s.Conditions { if c.Type == string(ISBSvcConditionChildrenResourcesHealthy) { assert.Equal(t, metav1.ConditionFalse, c.Status) diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index 6a3bf1406c..672c0e1621 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -2095,9 +2095,8 @@ func schema_pkg_apis_numaflow_v1alpha1_InterStepBufferServiceStatus(ref common.R }, "observedGeneration": { SchemaProps: spec.SchemaProps{ - Description: "ObservedGeneration stores the generation value observed by the controller.", - Type: []string{"integer"}, - Format: "int64", + Type: []string{"integer"}, + Format: "int64", }, }, }, @@ -3396,9 +3395,8 @@ func schema_pkg_apis_numaflow_v1alpha1_PipelineStatus(ref common.ReferenceCallba }, "observedGeneration": { SchemaProps: spec.SchemaProps{ - Description: "ObservedGeneration stores the generation value observed by the controller.", - Type: []string{"integer"}, - Format: "int64", + Type: []string{"integer"}, + Format: "int64", }, }, }, @@ -4913,6 +4911,26 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexStatus(ref common.ReferenceCallback SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Conditions are the latest available observations of a resource's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"), + }, + }, + }, + }, + }, "phase": { SchemaProps: spec.SchemaProps{ Default: "", @@ -4920,26 +4938,26 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexStatus(ref common.ReferenceCallback Format: "", }, }, - "reason": { + "replicas": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: 0, + Type: []string{"integer"}, + Format: "int64", }, }, - "message": { + "selector": { SchemaProps: spec.SchemaProps{ Type: []string{"string"}, Format: "", }, }, - "replicas": { + "reason": { SchemaProps: spec.SchemaProps{ - Default: 0, - Type: []string{"integer"}, - Format: "int64", + Type: []string{"string"}, + Format: "", }, }, - "selector": { + "message": { SchemaProps: spec.SchemaProps{ Type: []string{"string"}, Format: "", @@ -4950,31 +4968,10 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexStatus(ref common.ReferenceCallback Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, - "conditions": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Conditions are the latest available observations of a resource's current state.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"), - }, - }, - }, - }, - }, "observedGeneration": { SchemaProps: spec.SchemaProps{ - Description: "ObservedGeneration stores the generation value observed by the controller.", - Type: []string{"integer"}, - Format: "int64", + Type: []string{"integer"}, + Format: "int64", }, }, }, diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types.go b/pkg/apis/numaflow/v1alpha1/pipeline_types.go index 8fbc4c229f..a43ba5bd04 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) -// +kubebuilder:validation:Enum="";Running;Succeeded;Failed;Pausing;Paused;Deleting +// +kubebuilder:validation:Enum="";Running;Failed;Pausing;Paused;Deleting type PipelinePhase string const ( @@ -59,7 +59,6 @@ const ( // +kubebuilder:resource:shortName=pl // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` // +kubebuilder:printcolumn:name="Vertices",type=integer,JSONPath=`.status.vertexCount` // +kubebuilder:printcolumn:name="Sources",type=integer,JSONPath=`.status.sourceCount`,priority=10 // +kubebuilder:printcolumn:name="Sinks",type=integer,JSONPath=`.status.sinkCount`,priority=10 @@ -67,6 +66,7 @@ const ( // +kubebuilder:printcolumn:name="Map UDFs",type=integer,JSONPath=`.status.mapUDFCount`,priority=10 // +kubebuilder:printcolumn:name="Reduce UDFs",type=integer,JSONPath=`.status.reduceUDFCount`,priority=10 // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=true type Pipeline struct { @@ -609,18 +609,17 @@ type PipelineLimits struct { } type PipelineStatus struct { - Status `json:",inline" protobuf:"bytes,1,opt,name=status"` - Phase PipelinePhase `json:"phase,omitempty" protobuf:"bytes,2,opt,name=phase,casttype=PipelinePhase"` - Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` - LastUpdated metav1.Time `json:"lastUpdated,omitempty" protobuf:"bytes,4,opt,name=lastUpdated"` - VertexCount *uint32 `json:"vertexCount,omitempty" protobuf:"varint,5,opt,name=vertexCount"` - SourceCount *uint32 `json:"sourceCount,omitempty" protobuf:"varint,6,opt,name=sourceCount"` - SinkCount *uint32 `json:"sinkCount,omitempty" protobuf:"varint,7,opt,name=sinkCount"` - UDFCount *uint32 `json:"udfCount,omitempty" protobuf:"varint,8,opt,name=udfCount"` - MapUDFCount *uint32 `json:"mapUDFCount,omitempty" protobuf:"varint,9,opt,name=mapUDFCount"` - ReduceUDFCount *uint32 `json:"reduceUDFCount,omitempty" protobuf:"varint,10,opt,name=reduceUDFCount"` - // ObservedGeneration stores the generation value observed by the controller. - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,11,opt,name=observedGeneration"` + Status `json:",inline" protobuf:"bytes,1,opt,name=status"` + Phase PipelinePhase `json:"phase,omitempty" protobuf:"bytes,2,opt,name=phase,casttype=PipelinePhase"` + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + LastUpdated metav1.Time `json:"lastUpdated,omitempty" protobuf:"bytes,4,opt,name=lastUpdated"` + VertexCount *uint32 `json:"vertexCount,omitempty" protobuf:"varint,5,opt,name=vertexCount"` + SourceCount *uint32 `json:"sourceCount,omitempty" protobuf:"varint,6,opt,name=sourceCount"` + SinkCount *uint32 `json:"sinkCount,omitempty" protobuf:"varint,7,opt,name=sinkCount"` + UDFCount *uint32 `json:"udfCount,omitempty" protobuf:"varint,8,opt,name=udfCount"` + MapUDFCount *uint32 `json:"mapUDFCount,omitempty" protobuf:"varint,9,opt,name=mapUDFCount"` + ReduceUDFCount *uint32 `json:"reduceUDFCount,omitempty" protobuf:"varint,10,opt,name=reduceUDFCount"` + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,11,opt,name=observedGeneration"` } // SetVertexCounts sets the counts of vertices. @@ -683,17 +682,55 @@ func (pls *PipelineStatus) MarkDeployed() { pls.MarkTrue(PipelineConditionDeployed) } -// MarkPhaseRunning set the Pipeline has been running. -func (pls *PipelineStatus) MarkPhaseRunning() { - pls.SetPhase(PipelinePhaseRunning, "") -} - // MarkDeployFailed set the Pipeline deployment failed func (pls *PipelineStatus) MarkDeployFailed(reason, message string) { pls.MarkFalse(PipelineConditionDeployed, reason, message) pls.SetPhase(PipelinePhaseFailed, message) } +// MarkVerticesHealthy set the daemon service of the pipeline is healthy. +func (pls *PipelineStatus) MarkDaemonServiceHealthy() { + pls.MarkTrue(PipelineConditionDaemonServiceHealthy) +} + +// MarkDaemonServiceUnHealthy set the daemon service of the pipeline is unhealthy. +func (pls *PipelineStatus) MarkDaemonServiceUnHealthy(reason, message string) { + pls.MarkFalse(PipelineConditionDaemonServiceHealthy, reason, message) + pls.Message = "Degraded: " + message +} + +// MarkSideInputsManagersHealthy set the Side Inputs managers of the pipeline are healthy. +func (pls *PipelineStatus) MarkSideInputsManagersHealthy() { + pls.MarkTrue(PipelineConditionSideInputsManagersHealthy) +} + +// MarkSideInputsManagersHealthyWithReason set the Side Inputs managers of the pipeline are healthy with the given reason. +func (pls *PipelineStatus) MarkSideInputsManagersHealthyWithReason(reason, message string) { + pls.MarkTrueWithReason(PipelineConditionSideInputsManagersHealthy, reason, message) +} + +// MarkSideInputsManagersUnHealthy set the Side Inputs managers of the pipeline are unhealthy. +func (pls *PipelineStatus) MarkSideInputsManagersUnHealthy(reason, message string) { + pls.MarkFalse(PipelineConditionSideInputsManagersHealthy, reason, message) + pls.Message = "Degraded: " + message +} + +// MarkVerticesHealthy set the vertices of the pipeline are healthy. +func (pls *PipelineStatus) MarkVerticesHealthy() { + pls.MarkTrueWithReason(PipelineConditionVerticesHealthy, "Successful", "All vertices are healthy") +} + +// MarkVerticesUnHealthy set the vertices of the pipeline are unhealthy with the given reason. +func (pls *PipelineStatus) MarkVerticesUnHealthy(reason, message string) { + pls.MarkFalse(PipelineConditionVerticesHealthy, reason, message) + pls.Message = "Degraded: " + message +} + +// MarkPhaseRunning set the Pipeline has been running. +func (pls *PipelineStatus) MarkPhaseRunning() { + pls.SetPhase(PipelinePhaseRunning, "") +} + // MarkPhasePaused set the Pipeline has been paused. func (pls *PipelineStatus) MarkPhasePaused() { pls.SetPhase(PipelinePhasePaused, "Pipeline paused") @@ -731,16 +768,6 @@ func (pls *PipelineStatus) IsHealthy() bool { } } -// MarkServiceNotHealthy marks a service as not healthy with the given reason and message. -func (pls *PipelineStatus) MarkServiceNotHealthy(conditionType ConditionType, reason, message string) { - pls.MarkFalse(conditionType, reason, message) -} - -// MarkServiceHealthy marks a service as healthy with the given reason and message. -func (pls *PipelineStatus) MarkServiceHealthy(conditionType ConditionType, reason, message string) { - pls.MarkTrueWithReason(conditionType, reason, message) -} - // +kubebuilder:object:root=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type PipelineList struct { diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go b/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go index 25b60b5bd8..0bd93e53fb 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go @@ -273,9 +273,56 @@ func Test_PipelineMarkStatus(t *testing.T) { assert.Equal(t, metav1.ConditionTrue, c.Status) } } - s.MarkServiceHealthy(PipelineConditionDaemonServiceHealthy, "test-reason", "All service healthy") - s.MarkServiceHealthy(PipelineConditionSideInputsManagersHealthy, "test-reason", "All service healthy") - s.MarkServiceHealthy(PipelineConditionVerticesHealthy, "test-reason", "All service healthy") + s.MarkDaemonServiceUnHealthy("reason", "message") + for _, c := range s.Conditions { + if c.Type == string(PipelineConditionDaemonServiceHealthy) { + assert.Equal(t, metav1.ConditionFalse, c.Status) + assert.Equal(t, "reason", c.Reason) + assert.Equal(t, "message", c.Message) + } + } + s.MarkDaemonServiceHealthy() + for _, c := range s.Conditions { + if c.Type == string(PipelineConditionDaemonServiceHealthy) { + assert.Equal(t, metav1.ConditionTrue, c.Status) + } + } + s.MarkSideInputsManagersHealthyWithReason("reason", "message") + for _, c := range s.Conditions { + if c.Type == string(PipelineConditionSideInputsManagersHealthy) { + assert.Equal(t, metav1.ConditionTrue, c.Status) + assert.Equal(t, "reason", c.Reason) + assert.Equal(t, "message", c.Message) + } + } + s.MarkSideInputsManagersUnHealthy("reason", "message") + for _, c := range s.Conditions { + if c.Type == string(PipelineConditionSideInputsManagersHealthy) { + assert.Equal(t, metav1.ConditionFalse, c.Status) + assert.Equal(t, "reason", c.Reason) + assert.Equal(t, "message", c.Message) + } + } + s.MarkSideInputsManagersHealthy() + for _, c := range s.Conditions { + if c.Type == string(PipelineConditionSideInputsManagersHealthy) { + assert.Equal(t, metav1.ConditionTrue, c.Status) + } + } + s.MarkVerticesUnHealthy("reason", "message") + for _, c := range s.Conditions { + if c.Type == string(PipelineConditionVerticesHealthy) { + assert.Equal(t, metav1.ConditionFalse, c.Status) + assert.Equal(t, "reason", c.Reason) + assert.Equal(t, "message", c.Message) + } + } + s.MarkVerticesHealthy() + for _, c := range s.Conditions { + if c.Type == string(PipelineConditionVerticesHealthy) { + assert.Equal(t, metav1.ConditionTrue, c.Status) + } + } assert.True(t, s.IsReady()) } diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types.go b/pkg/apis/numaflow/v1alpha1/vertex_types.go index 39a4c05b18..186fa9c0bf 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types.go @@ -28,15 +28,13 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) -// +kubebuilder:validation:Enum="";Pending;Running;Succeeded;Failed +// +kubebuilder:validation:Enum="";Running;Failed type VertexPhase string const ( - VertexPhaseUnknown VertexPhase = "" - VertexPhasePending VertexPhase = "Pending" - VertexPhaseRunning VertexPhase = "Running" - VertexPhaseSucceeded VertexPhase = "Succeeded" - VertexPhaseFailed VertexPhase = "Failed" + VertexPhaseUnknown VertexPhase = "" + VertexPhaseRunning VertexPhase = "Running" + VertexPhaseFailed VertexPhase = "Failed" // VertexConditionPodsHealthy has the status True when all the vertex pods are healthy. VertexConditionPodsHealthy ConditionType = "PodsHealthy" @@ -57,10 +55,11 @@ const ( // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.reason` -// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` // +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.replicas` // +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.replicas` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.reason` +// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=true type Vertex struct { @@ -822,15 +821,14 @@ func (v VertexSpec) getType() containerSupplier { } type VertexStatus struct { - Phase VertexPhase `json:"phase" protobuf:"bytes,1,opt,name=phase,casttype=VertexPhase"` - Reason string `json:"reason,omitempty" protobuf:"bytes,6,opt,name=reason"` - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` - Replicas uint32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` - Selector string `json:"selector,omitempty" protobuf:"bytes,5,opt,name=selector"` - LastScaledAt metav1.Time `json:"lastScaledAt,omitempty" protobuf:"bytes,4,opt,name=lastScaledAt"` - Status `json:",inline" protobuf:"bytes,7,opt,name=status"` - // ObservedGeneration stores the generation value observed by the controller. - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,8,opt,name=observedGeneration"` + Status `json:",inline" protobuf:"bytes,1,opt,name=status"` + Phase VertexPhase `json:"phase" protobuf:"bytes,2,opt,name=phase,casttype=VertexPhase"` + Replicas uint32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + Selector string `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` + LastScaledAt metav1.Time `json:"lastScaledAt,omitempty" protobuf:"bytes,7,opt,name=lastScaledAt"` + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,8,opt,name=observedGeneration"` } func (vs *VertexStatus) MarkPhase(phase VertexPhase, reason, message string) { @@ -839,10 +837,12 @@ func (vs *VertexStatus) MarkPhase(phase VertexPhase, reason, message string) { vs.Message = message } +// MarkPhaseFailed marks the phase as failed with the given reason and message. func (vs *VertexStatus) MarkPhaseFailed(reason, message string) { vs.MarkPhase(VertexPhaseFailed, reason, message) } +// MarkPhaseRunning marks the phase as running. func (vs *VertexStatus) MarkPhaseRunning() { vs.MarkPhase(VertexPhaseRunning, "", "") } @@ -850,6 +850,8 @@ func (vs *VertexStatus) MarkPhaseRunning() { // MarkPodNotHealthy marks the pod not healthy with the given reason and message. func (vs *VertexStatus) MarkPodNotHealthy(reason, message string) { vs.MarkFalse(VertexConditionPodsHealthy, reason, message) + vs.Reason = reason + vs.Message = "Degraded: " + message } // MarkPodHealthy marks the pod as healthy with the given reason and message. diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go index b39c8e7ab4..6610908836 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go @@ -490,8 +490,8 @@ func Test_getType(t *testing.T) { func TestVertexMarkPhase(t *testing.T) { s := VertexStatus{} - s.MarkPhase(VertexPhasePending, "reason", "message") - assert.Equal(t, VertexPhasePending, s.Phase) + s.MarkPhase(VertexPhaseRunning, "reason", "message") + assert.Equal(t, VertexPhaseRunning, s.Phase) assert.Equal(t, "reason", s.Reason) assert.Equal(t, "message", s.Message) } diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go index 1c4e3e0d8f..bbf9bc3867 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go @@ -2520,8 +2520,8 @@ func (in *VertexSpec) DeepCopy() *VertexSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VertexStatus) DeepCopyInto(out *VertexStatus) { *out = *in - in.LastScaledAt.DeepCopyInto(&out.LastScaledAt) in.Status.DeepCopyInto(&out.Status) + in.LastScaledAt.DeepCopyInto(&out.LastScaledAt) return } diff --git a/pkg/reconciler/cmd/start.go b/pkg/reconciler/cmd/start.go index c5b480affe..1b65402431 100644 --- a/pkg/reconciler/cmd/start.go +++ b/pkg/reconciler/cmd/start.go @@ -18,7 +18,6 @@ package cmd import ( "context" - "reflect" "time" "go.uber.org/zap" @@ -184,15 +183,6 @@ func Start(namespaced bool, managedNamespace string) { handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.Pipeline{}, handler.OnlyControllerOwner()), predicate.And( predicate.ResourceVersionChangedPredicate{}, - predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - if e.ObjectOld == nil || e.ObjectNew == nil { - return true - } - old, _ := e.ObjectOld.(*dfv1.Vertex) - new, _ := e.ObjectNew.(*dfv1.Vertex) - return !reflect.DeepEqual(new.Spec.WithOutReplicas(), old.Spec.WithOutReplicas()) - }}, )); err != nil { logger.Fatalw("Unable to watch Vertices", zap.Error(err)) } diff --git a/pkg/reconciler/isbsvc/installer/installer_test.go b/pkg/reconciler/isbsvc/installer/installer_test.go index 6f042de818..ce6e5dc124 100644 --- a/pkg/reconciler/isbsvc/installer/installer_test.go +++ b/pkg/reconciler/isbsvc/installer/installer_test.go @@ -211,7 +211,7 @@ func TestInstall(t *testing.T) { testObj.Name = "fake-isb" err := Install(ctx, testObj, cl, kubeClient, fakeConfig, zaptest.NewLogger(t).Sugar(), record.NewFakeRecorder(64)) assert.NoError(t, err) - testObj.Status.MarkChildrenResourceNotHealthy("reason", "message") + testObj.Status.MarkChildrenResourceUnHealthy("reason", "message") assert.False(t, testObj.Status.IsReady()) assert.False(t, testObj.Status.IsHealthy()) diff --git a/pkg/reconciler/isbsvc/installer/jetstream.go b/pkg/reconciler/isbsvc/installer/jetstream.go index e0d618e9ac..d887f79e38 100644 --- a/pkg/reconciler/isbsvc/installer/jetstream.go +++ b/pkg/reconciler/isbsvc/installer/jetstream.go @@ -561,18 +561,18 @@ func (r *jetStreamInstaller) CheckChildrenResourceStatus(ctx context.Context) er Name: generateJetStreamStatefulSetName(r.isbSvc), }, &isbStatefulSet); err != nil { if apierrors.IsNotFound(err) { - r.isbSvc.Status.MarkChildrenResourceNotHealthy("GetStatefulSetFailed", + r.isbSvc.Status.MarkChildrenResourceUnHealthy("GetStatefulSetFailed", "StatefulSet not found, might be still under creation") return nil } - r.isbSvc.Status.MarkChildrenResourceNotHealthy("GetStatefulSetFailed", err.Error()) + r.isbSvc.Status.MarkChildrenResourceUnHealthy("GetStatefulSetFailed", err.Error()) return err } // calculate the status of the InterStepBufferService by statefulset status and update the status of isbSvc - if msg, reason, status := getStatefulSetStatus(&isbStatefulSet); status { + if status, reason, msg := reconciler.CheckStatefulSetStatus(&isbStatefulSet); status { r.isbSvc.Status.MarkChildrenResourceHealthy(reason, msg) } else { - r.isbSvc.Status.MarkChildrenResourceNotHealthy(reason, msg) + r.isbSvc.Status.MarkChildrenResourceUnHealthy(reason, msg) } return nil } diff --git a/pkg/reconciler/isbsvc/installer/native_redis.go b/pkg/reconciler/isbsvc/installer/native_redis.go index f3f5d2cd62..495d24b03b 100644 --- a/pkg/reconciler/isbsvc/installer/native_redis.go +++ b/pkg/reconciler/isbsvc/installer/native_redis.go @@ -628,18 +628,18 @@ func (r *redisInstaller) CheckChildrenResourceStatus(ctx context.Context) error Name: generateRedisStatefulSetName(r.isbSvc), }, &isbStatefulSet); err != nil { if apierrors.IsNotFound(err) { - r.isbSvc.Status.MarkChildrenResourceNotHealthy("GetStatefulSetFailed", + r.isbSvc.Status.MarkChildrenResourceUnHealthy("GetStatefulSetFailed", "StatefulSet not found, might be still under creation") return nil } - r.isbSvc.Status.MarkChildrenResourceNotHealthy("GetStatefulSetFailed", err.Error()) + r.isbSvc.Status.MarkChildrenResourceUnHealthy("GetStatefulSetFailed", err.Error()) return err } // calculate the status of the InterStepBufferService by statefulset status and update the status of isbSvc - if msg, reason, status := getStatefulSetStatus(&isbStatefulSet); status { + if status, reason, msg := reconciler.CheckStatefulSetStatus(&isbStatefulSet); status { r.isbSvc.Status.MarkChildrenResourceHealthy(reason, msg) } else { - r.isbSvc.Status.MarkChildrenResourceNotHealthy(reason, msg) + r.isbSvc.Status.MarkChildrenResourceUnHealthy(reason, msg) } return nil } diff --git a/pkg/reconciler/isbsvc/installer/watcher.go b/pkg/reconciler/isbsvc/installer/watcher.go deleted file mode 100644 index 88e06c11fa..0000000000 --- a/pkg/reconciler/isbsvc/installer/watcher.go +++ /dev/null @@ -1,36 +0,0 @@ -package installer - -import ( - "fmt" - - appv1 "k8s.io/api/apps/v1" -) - -// getStatefulSetStatus returns a message describing statefulset status, and a bool value indicating if the status is considered done. -// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/cea1d4e20b4a7886d8ff65f34c6d4f95efcb4742/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L130 -func getStatefulSetStatus(sts *appv1.StatefulSet) (string, string, bool) { - if sts.Status.ObservedGeneration == 0 || sts.Generation > sts.Status.ObservedGeneration { - return "Waiting for statefulset spec update to be observed...\n", "Progressing", false - } - if sts.Status.UpdateRevision != sts.Status.CurrentRevision { - return fmt.Sprintf("waiting for statefulset rolling update to complete %d pods at revision %s...\n", - sts.Status.UpdatedReplicas, sts.Status.UpdateRevision), "Progressing", false - } - if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas { - return fmt.Sprintf("Waiting for %d pods to be ready...\n", *sts.Spec.Replicas-sts.Status.ReadyReplicas), "Unavailable", false - } - if sts.Spec.UpdateStrategy.Type == appv1.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil { - if sts.Spec.Replicas != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil { - if sts.Status.UpdatedReplicas < (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition) { - return fmt.Sprintf( - "Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...\n", - sts.Status.UpdatedReplicas, *sts.Spec.Replicas-*sts.Spec.UpdateStrategy.RollingUpdate.Partition), "Progressing", false - } - } - return fmt.Sprintf("partitioned roll out complete: %d new pods have been updated...\n", - sts.Status.UpdatedReplicas), "Healthy", true - } - return fmt.Sprintf( - "statefulset rolling update complete %d pods at revision %s...\n", - sts.Status.CurrentReplicas, sts.Status.CurrentRevision), "Healthy", true -} diff --git a/pkg/reconciler/isbsvc/installer/watcher_test.go b/pkg/reconciler/isbsvc/installer/watcher_test.go deleted file mode 100644 index 79c912dc40..0000000000 --- a/pkg/reconciler/isbsvc/installer/watcher_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package installer - -import ( - "testing" - - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - statefulSet = &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-statefulset", - Namespace: "default", - }, - Status: appsv1.StatefulSetStatus{ - AvailableReplicas: 3, - CurrentReplicas: 3, - CurrentRevision: "isbsvc-default-js-597b7f74d7", - ObservedGeneration: 1, - ReadyReplicas: 3, - Replicas: 3, - UpdateRevision: "isbsvc-default-js-597b7f74d7", - UpdatedReplicas: 3, - }, - } -) - -func TestGetStatefulSetStatus(t *testing.T) { - t.Run("Test statefulset status as true", func(t *testing.T) { - testSts := statefulSet.DeepCopy() - msg, reason, status := getStatefulSetStatus(testSts) - assert.Equal(t, "Healthy", reason) - assert.True(t, status) - assert.Equal(t, "statefulset rolling update complete 3 pods at revision isbsvc-default-js-597b7f74d7...\n", msg) - }) - - t.Run("Test statefulset status as false", func(t *testing.T) { - testSts := statefulSet.DeepCopy() - testSts.Status.UpdateRevision = "isbsvc-default-js-597b7f73a1" - msg, reason, status := getStatefulSetStatus(testSts) - assert.Equal(t, "Progressing", reason) - assert.False(t, status) - assert.Equal(t, "waiting for statefulset rolling update to complete 3 pods at revision isbsvc-default-js-597b7f73a1...\n", msg) - }) - - t.Run("Test statefulset with ObservedGeneration as zero", func(t *testing.T) { - testSts := statefulSet.DeepCopy() - testSts.Status.ObservedGeneration = 0 - msg, reason, status := getStatefulSetStatus(testSts) - assert.Equal(t, "Progressing", reason) - assert.False(t, status) - assert.Equal(t, "Waiting for statefulset spec update to be observed...\n", msg) - }) -} diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index 2d0920349f..cffd45b93c 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -144,16 +144,9 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( }() pl.Status.SetObservedGeneration(pl.Generation) - // New, or reconciliation failed pipeline - if pl.Status.Phase == dfv1.PipelinePhaseUnknown || pl.Status.Phase == dfv1.PipelinePhaseFailed { - result, err := r.reconcileNonLifecycleChanges(ctx, pl) - if err != nil { - r.recorder.Eventf(pl, corev1.EventTypeWarning, "ReconcilePipelineFailed", "Failed to reconcile pipeline: %v", err.Error()) - } - return result, err - } - if oldPhase := pl.Status.Phase; oldPhase != pl.Spec.Lifecycle.GetDesiredPhase() { + if oldPhase := pl.Status.Phase; pl.Spec.Lifecycle.GetDesiredPhase() == dfv1.PipelinePhasePaused || + oldPhase == dfv1.PipelinePhasePaused || oldPhase == dfv1.PipelinePhasePausing { requeue, err := r.updateDesiredState(ctx, pl) if err != nil { logMsg := fmt.Sprintf("Updated desired pipeline phase failed: %v", zap.Error(err)) @@ -169,9 +162,10 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( return ctrl.Result{RequeueAfter: dfv1.DefaultRequeueAfter}, nil } return ctrl.Result{}, nil + } - // Regular pipeline update + // Regular pipeline change result, err := r.reconcileNonLifecycleChanges(ctx, pl) if err != nil { r.recorder.Eventf(pl, corev1.EventTypeWarning, "ReconcilePipelineFailed", "Failed to reconcile pipeline: %v", err.Error()) @@ -352,8 +346,8 @@ func (r *pipelineReconciler) reconcileNonLifecycleChanges(ctx context.Context, p pl.Status.MarkDeployed() pl.Status.SetPhase(pl.Spec.Lifecycle.GetDesiredPhase(), "") - if err := checkChildrenResourceStatus(ctx, r.client, pl); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to check children resource status, %w", err) + if err := r.checkChildrenResourceStatus(ctx, pl); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to check pipeline children resource status, %w", err) } return ctrl.Result{}, nil } @@ -946,41 +940,50 @@ func (r *pipelineReconciler) safeToDelete(ctx context.Context, pl *dfv1.Pipeline return daemonClient.IsDrained(ctx, pl.Name) } -func checkChildrenResourceStatus(ctx context.Context, c client.Client, pipeline *dfv1.Pipeline) error { +// checkChildrenResourceStatus checks the status of the children resources of the pipeline +func (r *pipelineReconciler) checkChildrenResourceStatus(ctx context.Context, pipeline *dfv1.Pipeline) error { + defer func() { + for _, c := range pipeline.Status.Conditions { + if c.Status != metav1.ConditionTrue { + pipeline.Status.SetPhase(pipeline.Spec.Lifecycle.GetDesiredPhase(), "Degraded: "+c.Message) + } + } + }() + // get the daemon deployment and update the status of it to the pipeline var daemonDeployment appv1.Deployment - if err := c.Get(ctx, client.ObjectKey{Namespace: pipeline.GetNamespace(), Name: pipeline.GetDaemonDeploymentName()}, + if err := r.client.Get(ctx, client.ObjectKey{Namespace: pipeline.GetNamespace(), Name: pipeline.GetDaemonDeploymentName()}, &daemonDeployment); err != nil { if apierrors.IsNotFound(err) { - pipeline.Status.MarkServiceNotHealthy(dfv1.PipelineConditionDaemonServiceHealthy, + pipeline.Status.MarkDaemonServiceUnHealthy( "GetDaemonServiceFailed", "Deployment not found, might be still under creation") return nil } - pipeline.Status.MarkServiceNotHealthy(dfv1.PipelineConditionDaemonServiceHealthy, "GetDaemonServiceFailed", err.Error()) + pipeline.Status.MarkDaemonServiceUnHealthy("GetDaemonServiceFailed", err.Error()) return err } - if msg, reason, status := getDeploymentStatus(&daemonDeployment); status { - pipeline.Status.MarkServiceHealthy(dfv1.PipelineConditionDaemonServiceHealthy, reason, msg) + if status, reason, msg := reconciler.CheckDeploymentStatus(&daemonDeployment); status { + pipeline.Status.MarkDaemonServiceHealthy() } else { - pipeline.Status.MarkServiceNotHealthy(dfv1.PipelineConditionDaemonServiceHealthy, reason, msg) + pipeline.Status.MarkDaemonServiceUnHealthy(reason, msg) } // get the side input deployments and update the status of them to the pipeline if len(pipeline.Spec.SideInputs) == 0 { - pipeline.Status.MarkServiceHealthy(dfv1.PipelineConditionSideInputsManagersHealthy, + pipeline.Status.MarkSideInputsManagersHealthyWithReason( "NoSideInputs", "No Side Inputs attached to the pipeline") } else { var sideInputs appv1.DeploymentList selector, _ := labels.Parse(dfv1.KeyPipelineName + "=" + pipeline.Name + "," + dfv1.KeyComponent + "=" + dfv1.ComponentSideInputManager) - if err := c.List(ctx, &sideInputs, &client.ListOptions{Namespace: pipeline.Namespace, LabelSelector: selector}); err != nil { - pipeline.Status.MarkServiceNotHealthy(dfv1.PipelineConditionSideInputsManagersHealthy, "ListSideInputsManagersFailed", err.Error()) + if err := r.client.List(ctx, &sideInputs, &client.ListOptions{Namespace: pipeline.Namespace, LabelSelector: selector}); err != nil { + pipeline.Status.MarkSideInputsManagersUnHealthy("ListSideInputsManagersFailed", err.Error()) return err } for _, sideInput := range sideInputs.Items { - if msg, reason, status := getDeploymentStatus(&sideInput); status { - pipeline.Status.MarkServiceHealthy(dfv1.PipelineConditionSideInputsManagersHealthy, reason, msg) + if status, reason, msg := reconciler.CheckDeploymentStatus(&sideInput); status { + pipeline.Status.MarkSideInputsManagersHealthy() } else { - pipeline.Status.MarkServiceNotHealthy(dfv1.PipelineConditionSideInputsManagersHealthy, reason, msg) + pipeline.Status.MarkSideInputsManagersUnHealthy(reason, msg) break } } @@ -989,15 +992,15 @@ func checkChildrenResourceStatus(ctx context.Context, c client.Client, pipeline // calculate the status of the vertices and update the status of them to the pipeline var vertices dfv1.VertexList selector, _ := labels.Parse(dfv1.KeyPipelineName + "=" + pipeline.GetName() + "," + dfv1.KeyComponent + "=" + dfv1.ComponentVertex) - if err := c.List(ctx, &vertices, &client.ListOptions{Namespace: pipeline.Namespace, LabelSelector: selector}); err != nil { - pipeline.Status.MarkServiceNotHealthy(dfv1.PipelineConditionVerticesHealthy, "ListVerticesFailed", err.Error()) + if err := r.client.List(ctx, &vertices, &client.ListOptions{Namespace: pipeline.Namespace, LabelSelector: selector}); err != nil { + pipeline.Status.MarkVerticesUnHealthy("ListVerticesFailed", err.Error()) return err } - status, reason := getVertexStatus(&vertices) + status, reason, message := reconciler.CheckVertexStatus(&vertices) if status { - pipeline.Status.MarkServiceHealthy(dfv1.PipelineConditionVerticesHealthy, reason, "All Vertices are healthy") + pipeline.Status.MarkVerticesHealthy() } else { - pipeline.Status.MarkServiceNotHealthy(dfv1.PipelineConditionVerticesHealthy, reason, "Some Vertices are not healthy") + pipeline.Status.MarkVerticesUnHealthy(reason, "Some Vertices are unhealthy: "+message) } return nil diff --git a/pkg/reconciler/pipeline/controller_test.go b/pkg/reconciler/pipeline/controller_test.go index 4fdf4236c6..3e65502147 100644 --- a/pkg/reconciler/pipeline/controller_test.go +++ b/pkg/reconciler/pipeline/controller_test.go @@ -932,7 +932,7 @@ func Test_checkChildrenResourceStatus(t *testing.T) { testObj := testPipelineWithSideinput.DeepCopy() _, err = r.reconcile(ctx, testObj) assert.NoError(t, err) - err = checkChildrenResourceStatus(ctx, cl, testObj) + err = r.checkChildrenResourceStatus(ctx, testObj) assert.NoError(t, err) for _, c := range testObj.Status.Conditions { if c.Type == string(dfv1.PipelineConditionDaemonServiceHealthy) { diff --git a/pkg/reconciler/pipeline/watcher.go b/pkg/reconciler/pipeline/watcher.go deleted file mode 100644 index 2b65987dc4..0000000000 --- a/pkg/reconciler/pipeline/watcher.go +++ /dev/null @@ -1,62 +0,0 @@ -package pipeline - -import ( - "fmt" - - appv1 "k8s.io/api/apps/v1" - - dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" -) - -// getVertexStatus will calculate the status of the vertices and return the status and reason -func getVertexStatus(vertices *dfv1.VertexList) (bool, string) { - for _, vertex := range vertices.Items { - if vertex.Status.ObservedGeneration == 0 || vertex.Generation > vertex.Status.ObservedGeneration { - return false, "Progressing" - } - if !vertex.Status.IsHealthy() { - return false, "Unavailable" - } - } - return true, "Healthy" -} - -// getDeploymentStatus returns a message describing deployment status, and message with reason where bool value -// indicating if the status is considered done. -// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/cea1d4e20b4a7886d8ff65f34c6d4f95efcb4742/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L59 -func getDeploymentStatus(deployment *appv1.Deployment) (string, string, bool) { - if deployment.Generation <= deployment.Status.ObservedGeneration { - cond := getDeploymentCondition(deployment.Status, appv1.DeploymentProgressing) - if cond != nil && cond.Reason == "ProgressDeadlineExceeded" { - return fmt.Sprintf("deployment %q exceeded its progress deadline", deployment.Name), "ProgressDeadlineExceeded", false - } - if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas { - return fmt.Sprintf( - "Waiting for deployment %q rollout to finish: %d out of %d new replicas have been updated...\n", - deployment.Name, deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas), "Progressing", false - } - if deployment.Status.Replicas > deployment.Status.UpdatedReplicas { - return fmt.Sprintf( - "Waiting for deployment %q rollout to finish: %d old replicas are pending termination...\n", - deployment.Name, deployment.Status.Replicas-deployment.Status.UpdatedReplicas), "Progressing", false - } - if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas { - return fmt.Sprintf( - "Waiting for deployment %q rollout to finish: %d of %d updated replicas are available...\n", - deployment.Name, deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas), "Progressing", false - } - return fmt.Sprintf("deployment %q successfully rolled out\n", deployment.Name), "Healthy", true - } - return "Waiting for deployment spec update to be observed...", "Progressing", false -} - -// GetDeploymentCondition returns the condition with the provided type. -func getDeploymentCondition(status appv1.DeploymentStatus, condType appv1.DeploymentConditionType) *appv1.DeploymentCondition { - for i := range status.Conditions { - c := status.Conditions[i] - if c.Type == condType { - return &c - } - } - return nil -} diff --git a/pkg/reconciler/pipeline/watcher_test.go b/pkg/reconciler/pipeline/watcher_test.go deleted file mode 100644 index 0cd01a5024..0000000000 --- a/pkg/reconciler/pipeline/watcher_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package pipeline - -import ( - "testing" - - "github.com/stretchr/testify/assert" - appv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" -) - -var ( - replicas int32 = 1 - deployment = appv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "default", - }, - Spec: appv1.DeploymentSpec{ - Replicas: &replicas, - }, - Status: appv1.DeploymentStatus{ - ObservedGeneration: 1, - UpdatedReplicas: 1, - Replicas: 1, - AvailableReplicas: 1, - }, - } -) - -func TestGetDeploymentStatus(t *testing.T) { - t.Run("Test Deployment status as true", func(t *testing.T) { - testDeployment := deployment.DeepCopy() - message, reason, done := getDeploymentStatus(testDeployment) - assert.Equal(t, "Healthy", reason) - assert.True(t, done) - assert.Equal(t, "deployment \"test-deployment\" successfully rolled out\n", message) - }) - - t.Run("Test Deployment status as false", func(t *testing.T) { - testDeployment := deployment.DeepCopy() - testDeployment.Status.ObservedGeneration = 0 - testDeployment.Status.UpdatedReplicas = 0 - message, reason, done := getDeploymentStatus(testDeployment) - assert.Equal(t, "Progressing", reason) - assert.False(t, done) - assert.Equal(t, "Waiting for deployment \"test-deployment\" rollout to finish: 0 out of 1 new replicas have been updated...\n", message) - }) - - t.Run("Test deployment status as false while updating replica", func(t *testing.T) { - testDeployment := deployment.DeepCopy() - testDeployment.Status.UpdatedReplicas = 1 - testDeployment.Status.Replicas = 2 - message, reason, done := getDeploymentStatus(testDeployment) - assert.Equal(t, "Progressing", reason) - assert.False(t, done) - assert.Equal(t, "Waiting for deployment \"test-deployment\" rollout to finish: 1 old replicas are pending termination...\n", message) - }) -} - -func TestGetVertexStatus(t *testing.T) { - t.Run("Test Vertex status as true", func(t *testing.T) { - vertices := dfv1.VertexList{ - Items: []dfv1.Vertex{ - { - ObjectMeta: metav1.ObjectMeta{ - Generation: 1, - }, - Status: dfv1.VertexStatus{ - Phase: "Running", - ObservedGeneration: 1, - }, - }, - }, - } - vertices.Items[0].Status.Conditions = []metav1.Condition{ - { - Type: string(dfv1.VertexConditionPodsHealthy), - Status: metav1.ConditionTrue, - }, - } - status, reason := getVertexStatus(&vertices) - assert.True(t, status) - assert.Equal(t, "Healthy", reason) - }) - - t.Run("Test Vertex status as false when ObservedGeneration is not matching", func(t *testing.T) { - vertices := dfv1.VertexList{ - Items: []dfv1.Vertex{ - { - ObjectMeta: metav1.ObjectMeta{ - Generation: 2, - }, - Status: dfv1.VertexStatus{ - Phase: "Running", - ObservedGeneration: 1, - }, - }, - }, - } - vertices.Items[0].Status.Conditions = []metav1.Condition{ - { - Type: string(dfv1.VertexConditionPodsHealthy), - Status: metav1.ConditionTrue, - }, - } - status, reason := getVertexStatus(&vertices) - assert.False(t, status) - assert.Equal(t, "Progressing", reason) - }) - - t.Run("Test Vertex status as false", func(t *testing.T) { - vertices := dfv1.VertexList{ - Items: []dfv1.Vertex{ - { - Status: dfv1.VertexStatus{ - Phase: "Pending", - }, - }, - }, - } - vertices.Items[0].Status.Conditions = []metav1.Condition{ - { - Type: string(dfv1.VertexConditionPodsHealthy), - Status: metav1.ConditionTrue, - }, - } - status, reason := getVertexStatus(&vertices) - assert.False(t, status) - assert.Equal(t, "Progressing", reason) - }) -} diff --git a/pkg/reconciler/util.go b/pkg/reconciler/util.go new file mode 100644 index 0000000000..a9d58871b4 --- /dev/null +++ b/pkg/reconciler/util.go @@ -0,0 +1,135 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "fmt" + + appv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + + dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" +) + +// CheckVertexPodsStatus checks the status by iterating over pods objects +func CheckVertexPodsStatus(vertexPods *corev1.PodList) (healthy bool, reason string, message string) { + // TODO: Need to revisit later. + if len(vertexPods.Items) == 0 { + return true, "NoPodsFound", "No Pods found" + } else { + for _, pod := range vertexPods.Items { + if podHealthy, msg := isPodHealthy(&pod); !podHealthy { + message = fmt.Sprintf("Pod %s is unhealthy", pod.Name) + reason = "Pod" + msg + healthy = false + return + } + } + } + return true, "Running", "All vertex pods are healthy" +} + +func isPodHealthy(pod *corev1.Pod) (healthy bool, reason string) { + for _, c := range pod.Status.ContainerStatuses { + if c.State.Waiting != nil && c.State.Waiting.Reason == "CrashLoopBackOff" { + return false, c.State.Waiting.Reason + } + } + return true, "" +} + +// CheckVertexStatus will calculate the status of the vertices and return the status and reason +func CheckVertexStatus(vertices *dfv1.VertexList) (healthy bool, reason string, message string) { + for _, vertex := range vertices.Items { + if vertex.Status.ObservedGeneration == 0 || vertex.Generation > vertex.Status.ObservedGeneration { + return false, "Progressing", `Vertex "` + vertex.Spec.Name + `" Waiting for reconciliation` + } + if !vertex.Status.IsHealthy() { + return false, "Unavailable", `Vertex "` + vertex.Spec.Name + `" is not healthy` + } + } + return true, "Healthy", "All vertices are healthy" +} + +// CheckDeploymentStatus returns a message describing deployment status, and message with reason where bool value +// indicating if the status is considered done. +// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/cea1d4e20b4a7886d8ff65f34c6d4f95efcb4742/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L59 +func CheckDeploymentStatus(deployment *appv1.Deployment) (done bool, reason string, message string) { + if deployment.Generation <= deployment.Status.ObservedGeneration { + cond := getDeploymentCondition(deployment.Status, appv1.DeploymentProgressing) + if cond != nil && cond.Reason == "ProgressDeadlineExceeded" { + return false, "ProgressDeadlineExceeded", fmt.Sprintf("deployment %q exceeded its progress deadline", deployment.Name) + } + if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas { + return false, "Progressing", fmt.Sprintf( + "Waiting for deployment %q rollout to finish: %d out of %d new replicas have been updated...", + deployment.Name, deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas) + } + if deployment.Status.Replicas > deployment.Status.UpdatedReplicas { + return false, "Progressing", fmt.Sprintf( + "Waiting for deployment %q rollout to finish: %d old replicas are pending termination...", + deployment.Name, deployment.Status.Replicas-deployment.Status.UpdatedReplicas) + } + if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas { + return false, "Progressing", fmt.Sprintf( + "Waiting for deployment %q rollout to finish: %d of %d updated replicas are available...", + deployment.Name, deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas) + } + return true, "Healthy", fmt.Sprintf("deployment %q successfully rolled out", deployment.Name) + } + return false, "Progressing", "Waiting for deployment spec update to be observed..." +} + +// GetDeploymentCondition returns the condition with the provided type. +func getDeploymentCondition(status appv1.DeploymentStatus, condType appv1.DeploymentConditionType) *appv1.DeploymentCondition { + for i := range status.Conditions { + c := status.Conditions[i] + if c.Type == condType { + return &c + } + } + return nil +} + +// CheckStatefulSetStatus returns a message describing statefulset status, and a bool value indicating if the status is considered done. +// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/cea1d4e20b4a7886d8ff65f34c6d4f95efcb4742/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L130 +func CheckStatefulSetStatus(sts *appv1.StatefulSet) (done bool, reason string, message string) { + if sts.Status.ObservedGeneration == 0 || sts.Generation > sts.Status.ObservedGeneration { + return false, "Progressing", "Waiting for statefulset spec update to be observed..." + } + if sts.Status.UpdateRevision != sts.Status.CurrentRevision { + return false, "Progressing", fmt.Sprintf("waiting for statefulset rolling update to complete %d pods at revision %s...", + sts.Status.UpdatedReplicas, sts.Status.UpdateRevision) + } + if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas { + return false, "Unavailable", fmt.Sprintf("Waiting for %d pods to be ready...\n", *sts.Spec.Replicas-sts.Status.ReadyReplicas) + } + if sts.Spec.UpdateStrategy.Type == appv1.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil { + if sts.Spec.Replicas != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil { + if sts.Status.UpdatedReplicas < (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition) { + return false, "Progressing", fmt.Sprintf( + "Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...\n", + sts.Status.UpdatedReplicas, *sts.Spec.Replicas-*sts.Spec.UpdateStrategy.RollingUpdate.Partition) + } + } + return true, "Healthy", fmt.Sprintf("partitioned roll out complete: %d new pods have been updated...\n", + sts.Status.UpdatedReplicas) + } + return true, "Healthy", fmt.Sprintf( + "statefulset rolling update complete %d pods at revision %s...\n", + sts.Status.CurrentReplicas, sts.Status.CurrentRevision) +} diff --git a/pkg/reconciler/util_test.go b/pkg/reconciler/util_test.go new file mode 100644 index 0000000000..36fbbfa5c5 --- /dev/null +++ b/pkg/reconciler/util_test.go @@ -0,0 +1,256 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "testing" + + "github.com/stretchr/testify/assert" + appv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" +) + +func TestCheckVertexPodsStatus(t *testing.T) { + t.Run("Test Vertex status as true", func(t *testing.T) { + pods := corev1.PodList{Items: []corev1.Pod{ + {ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}, Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + {State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{Reason: "Running"}}}, + }}, + }}, + } + done, reason, message := CheckVertexPodsStatus(&pods) + assert.Equal(t, "All vertex pods are healthy", message) + assert.Equal(t, "Running", reason) + assert.True(t, done) + }) + + t.Run("Test Vertex status as false", func(t *testing.T) { + pods := corev1.PodList{ + Items: []corev1.Pod{ + {ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}, Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + {State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{Reason: "CrashLoopBackOff"}}}, + }}, + }, + }, + } + done, reason, message := CheckVertexPodsStatus(&pods) + assert.Equal(t, "Pod test-pod is unhealthy", message) + assert.Equal(t, "PodCrashLoopBackOff", reason) + assert.False(t, done) + }) + + t.Run("Test Vertex status as false with no pods", func(t *testing.T) { + pods := corev1.PodList{ + Items: []corev1.Pod{}, + } + done, reason, message := CheckVertexPodsStatus(&pods) + assert.Equal(t, "No Pods found", message) + assert.Equal(t, "NoPodsFound", reason) + assert.True(t, done) + }) +} + +var ( + replicas int32 = 1 + deployment = appv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appv1.DeploymentSpec{ + Replicas: &replicas, + }, + Status: appv1.DeploymentStatus{ + ObservedGeneration: 1, + UpdatedReplicas: 1, + Replicas: 1, + AvailableReplicas: 1, + }, + } +) + +func TestGetDeploymentStatus(t *testing.T) { + t.Run("Test Deployment status as true", func(t *testing.T) { + testDeployment := deployment.DeepCopy() + done, reason, message := CheckDeploymentStatus(testDeployment) + assert.Equal(t, "Healthy", reason) + assert.True(t, done) + assert.Equal(t, "deployment \"test-deployment\" successfully rolled out", message) + }) + + t.Run("Test Deployment status as false", func(t *testing.T) { + testDeployment := deployment.DeepCopy() + testDeployment.Status.ObservedGeneration = 0 + testDeployment.Status.UpdatedReplicas = 0 + done, reason, message := CheckDeploymentStatus(testDeployment) + assert.Equal(t, "Progressing", reason) + assert.False(t, done) + assert.Equal(t, "Waiting for deployment \"test-deployment\" rollout to finish: 0 out of 1 new replicas have been updated...", message) + }) + + t.Run("Test deployment status as false while updating replica", func(t *testing.T) { + testDeployment := deployment.DeepCopy() + testDeployment.Status.UpdatedReplicas = 1 + testDeployment.Status.Replicas = 2 + done, reason, message := CheckDeploymentStatus(testDeployment) + assert.Equal(t, "Progressing", reason) + assert.False(t, done) + assert.Equal(t, "Waiting for deployment \"test-deployment\" rollout to finish: 1 old replicas are pending termination...", message) + }) +} + +func TestGetVertexStatus(t *testing.T) { + t.Run("Test Vertex status as true", func(t *testing.T) { + vertices := dfv1.VertexList{ + Items: []dfv1.Vertex{ + { + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Status: dfv1.VertexStatus{ + Phase: "Running", + ObservedGeneration: 1, + }, + }, + }, + } + vertices.Items[0].Status.Conditions = []metav1.Condition{ + { + Type: string(dfv1.VertexConditionPodsHealthy), + Status: metav1.ConditionTrue, + }, + } + status, reason, message := CheckVertexStatus(&vertices) + assert.True(t, status) + assert.Equal(t, "Healthy", reason) + assert.Equal(t, "All vertices are healthy", message) + }) + + t.Run("Test Vertex status as false when ObservedGeneration is not matching", func(t *testing.T) { + vertices := dfv1.VertexList{ + Items: []dfv1.Vertex{ + { + ObjectMeta: metav1.ObjectMeta{ + Generation: 2, + }, + Spec: dfv1.VertexSpec{ + AbstractVertex: dfv1.AbstractVertex{ + Name: "test-vertex", + }, + }, + Status: dfv1.VertexStatus{ + Phase: "Running", + ObservedGeneration: 1, + }, + }, + }, + } + vertices.Items[0].Status.Conditions = []metav1.Condition{ + { + Type: string(dfv1.VertexConditionPodsHealthy), + Status: metav1.ConditionTrue, + }, + } + status, reason, message := CheckVertexStatus(&vertices) + assert.False(t, status) + assert.Equal(t, "Progressing", reason) + assert.Equal(t, `Vertex "test-vertex" Waiting for reconciliation`, message) + }) + + t.Run("Test Vertex status as false", func(t *testing.T) { + vertices := dfv1.VertexList{ + Items: []dfv1.Vertex{ + { + ObjectMeta: metav1.ObjectMeta{ + Generation: 2, + }, + Spec: dfv1.VertexSpec{ + AbstractVertex: dfv1.AbstractVertex{ + Name: "test-vertex", + }, + }, + Status: dfv1.VertexStatus{ + Phase: "Pending", + ObservedGeneration: 2, + }, + }, + }, + } + vertices.Items[0].Status.Conditions = []metav1.Condition{ + { + Type: string(dfv1.VertexConditionPodsHealthy), + Status: metav1.ConditionTrue, + }, + } + status, reason, message := CheckVertexStatus(&vertices) + assert.False(t, status) + assert.Equal(t, "Unavailable", reason) + assert.Equal(t, `Vertex "test-vertex" is not healthy`, message) + }) +} + +var ( + statefulSet = &appv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefulset", + Namespace: "default", + }, + Status: appv1.StatefulSetStatus{ + AvailableReplicas: 3, + CurrentReplicas: 3, + CurrentRevision: "isbsvc-default-js-597b7f74d7", + ObservedGeneration: 1, + ReadyReplicas: 3, + Replicas: 3, + UpdateRevision: "isbsvc-default-js-597b7f74d7", + UpdatedReplicas: 3, + }, + } +) + +func TestGetStatefulSetStatus(t *testing.T) { + t.Run("Test statefulset status as true", func(t *testing.T) { + testSts := statefulSet.DeepCopy() + status, reason, msg := CheckStatefulSetStatus(testSts) + assert.Equal(t, "Healthy", reason) + assert.True(t, status) + assert.Equal(t, "statefulset rolling update complete 3 pods at revision isbsvc-default-js-597b7f74d7...\n", msg) + }) + + t.Run("Test statefulset status as false", func(t *testing.T) { + testSts := statefulSet.DeepCopy() + testSts.Status.UpdateRevision = "isbsvc-default-js-597b7f73a1" + status, reason, msg := CheckStatefulSetStatus(testSts) + assert.Equal(t, "Progressing", reason) + assert.False(t, status) + assert.Equal(t, "waiting for statefulset rolling update to complete 3 pods at revision isbsvc-default-js-597b7f73a1...", msg) + }) + + t.Run("Test statefulset with ObservedGeneration as zero", func(t *testing.T) { + testSts := statefulSet.DeepCopy() + testSts.Status.ObservedGeneration = 0 + status, reason, msg := CheckStatefulSetStatus(testSts) + assert.Equal(t, "Progressing", reason) + assert.False(t, status) + assert.Equal(t, "Waiting for statefulset spec update to be observed...", msg) + }) +} diff --git a/pkg/reconciler/vertex/controller.go b/pkg/reconciler/vertex/controller.go index a1ccd7dd62..a4a84d1e5e 100644 --- a/pkg/reconciler/vertex/controller.go +++ b/pkg/reconciler/vertex/controller.go @@ -95,6 +95,8 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( return ctrl.Result{}, nil } + vertex.Status.SetObservedGeneration(vertex.Generation) + isbSvc := &dfv1.InterStepBufferService{} isbSvcName := dfv1.DefaultISBSvcName if len(vertex.Spec.InterStepBufferServiceName) > 0 { @@ -147,7 +149,7 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( return ctrl.Result{}, err } if err := r.client.Create(ctx, newPvc); err != nil && !apierrors.IsAlreadyExists(err) { - r.markPhaseLogEvent(vertex, log, "CreatePVCFailed", err.Error(), "Error creating a PVC", zap.Error(err)) + r.markPhaseFailedAndLogEvent(vertex, log, "CreatePVCFailed", err.Error(), "Error creating a PVC", zap.Error(err)) return ctrl.Result{}, err } r.recorder.Eventf(vertex, corev1.EventTypeNormal, "CreatePVCSuccess", "Successfully created PVC %s", newPvc.Name) @@ -183,7 +185,7 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( if existingSvc.GetAnnotations()[dfv1.KeyHash] != svcHash { if err := r.client.Delete(ctx, &existingSvc); err != nil { if !apierrors.IsNotFound(err) { - r.markPhaseLogEvent(vertex, log, "DelSvcFailed", err.Error(), "Failed to delete existing service", zap.String("service", existingSvc.Name), zap.Error(err)) + r.markPhaseFailedAndLogEvent(vertex, log, "DelSvcFailed", err.Error(), "Failed to delete existing service", zap.String("service", existingSvc.Name), zap.Error(err)) return ctrl.Result{}, err } } else { @@ -201,7 +203,7 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( if apierrors.IsAlreadyExists(err) { continue } - r.markPhaseLogEvent(vertex, log, "CreateSvcFailed", err.Error(), "Failed to create a service", zap.String("service", s.Name), zap.Error(err)) + r.markPhaseFailedAndLogEvent(vertex, log, "CreateSvcFailed", err.Error(), "Failed to create a service", zap.String("service", s.Name), zap.Error(err)) return ctrl.Result{}, err } else { log.Infow("Succeeded to create a service", zap.String("service", s.Name)) @@ -212,7 +214,7 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( for _, v := range existingSvcs { // clean up stale services if err := r.client.Delete(ctx, &v); err != nil { if !apierrors.IsNotFound(err) { - r.markPhaseLogEvent(vertex, log, "DelSvcFailed", err.Error(), "Failed to delete service not in use", zap.String("service", v.Name), zap.Error(err)) + r.markPhaseFailedAndLogEvent(vertex, log, "DelSvcFailed", err.Error(), "Failed to delete service not in use", zap.String("service", v.Name), zap.Error(err)) return ctrl.Result{}, err } } else { @@ -295,7 +297,7 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( } pod.Spec.Hostname = fmt.Sprintf("%s-%d", vertex.Name, replica) if err := r.client.Create(ctx, pod); err != nil { - r.markPhaseLogEvent(vertex, log, "CreatePodFailed", err.Error(), "Failed to created pod", zap.Error(err)) + r.markPhaseFailedAndLogEvent(vertex, log, "CreatePodFailed", err.Error(), "Failed to created pod", zap.Error(err)) return ctrl.Result{}, err } log.Infow("Succeeded to create a pod", zap.String("pod", pod.Name)) @@ -304,7 +306,7 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( } for _, v := range existingPods { if err := r.client.Delete(ctx, &v); err != nil && !apierrors.IsNotFound(err) { - r.markPhaseLogEvent(vertex, log, "DelPodFailed", err.Error(), "Failed to delete pod", zap.Error(err)) + r.markPhaseFailedAndLogEvent(vertex, log, "DelPodFailed", err.Error(), "Failed to delete pod", zap.Error(err)) return ctrl.Result{}, err } } @@ -319,11 +321,22 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( selector, _ := labels.Parse(dfv1.KeyPipelineName + "=" + vertex.Spec.PipelineName + "," + dfv1.KeyVertexName + "=" + vertex.Spec.Name) vertex.Status.Selector = selector.String() + // Mark it running before checking the status of the pods vertex.Status.MarkPhaseRunning() - vertex.Status.SetObservedGeneration(vertex.Generation) - if err = checkChildrenResourceStatus(ctx, r.client, vertex); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to check children resource status: %w", err) + + // Check status of the pods + var podList corev1.PodList + if err := r.client.List(ctx, &podList, &client.ListOptions{Namespace: vertex.GetNamespace(), LabelSelector: selector}); err != nil { + vertex.Status.MarkPodNotHealthy("ListVerticesPodsFailed", err.Error()) + return ctrl.Result{}, fmt.Errorf("failed to get pods of a vertex: %w", err) } + if healthy, reason, msg := reconciler.CheckVertexPodsStatus(&podList); healthy { + vertex.Status.MarkPodHealthy(reason, msg) + } else { + // Do not need to explicitly requeue, since the it keeps watching the status change of the pods + vertex.Status.MarkPodNotHealthy(reason, msg) + } + return ctrl.Result{}, nil } @@ -437,27 +450,9 @@ func (r *vertexReconciler) findExistingServices(ctx context.Context, vertex *dfv return result, nil } -// helper function for warning event types -func (r *vertexReconciler) markPhaseLogEvent(vertex *dfv1.Vertex, log *zap.SugaredLogger, reason, message, logMsg string, logWith ...interface{}) { +// Helper function for warning event types +func (r *vertexReconciler) markPhaseFailedAndLogEvent(vertex *dfv1.Vertex, log *zap.SugaredLogger, reason, message, logMsg string, logWith ...interface{}) { log.Errorw(logMsg, logWith) vertex.Status.MarkPhaseFailed(reason, message) r.recorder.Event(vertex, corev1.EventTypeWarning, reason, message) } - -func checkChildrenResourceStatus(ctx context.Context, c client.Client, vertex *dfv1.Vertex) error { - // fetch the pods for calculating the status of child resources - var podList corev1.PodList - selector, _ := labels.Parse(dfv1.KeyPipelineName + "=" + vertex.Spec.PipelineName + "," + dfv1.KeyVertexName + "=" + vertex.Spec.Name) - if err := c.List(ctx, &podList, &client.ListOptions{Namespace: vertex.GetNamespace(), LabelSelector: selector}); err != nil { - vertex.Status.MarkPodNotHealthy("ListVerticesPodsFailed", err.Error()) - return err - } - - if msg, reason, status := getVertexStatus(&podList); status { - vertex.Status.MarkPodHealthy(reason, msg) - } else { - vertex.Status.MarkPodNotHealthy(reason, msg) - } - - return nil -} diff --git a/pkg/reconciler/vertex/controller_test.go b/pkg/reconciler/vertex/controller_test.go index e37ab0de8e..f0f9128a8f 100644 --- a/pkg/reconciler/vertex/controller_test.go +++ b/pkg/reconciler/vertex/controller_test.go @@ -701,42 +701,3 @@ func getEvents(reconciler *vertexReconciler) []string { } return events } - -func Test_checkChildrenResourceStatus(t *testing.T) { - t.Run("test check children resource status", func(t *testing.T) { - cl := fake.NewClientBuilder().Build() - ctx := context.TODO() - testIsbSvc := testNativeRedisIsbSvc.DeepCopy() - testIsbSvc.Status.MarkConfigured() - testIsbSvc.Status.MarkDeployed() - err := cl.Create(ctx, testIsbSvc) - assert.Nil(t, err) - testPl := testPipeline.DeepCopy() - err = cl.Create(ctx, testPl) - assert.Nil(t, err) - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), - image: testFlowImage, - scaler: scaling.NewScaler(cl), - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } - testObj := testVertex.DeepCopy() - testObj.Spec.UDF = &dfv1.UDF{ - Builtin: &dfv1.Function{ - Name: "cat", - }, - } - _, err = r.reconcile(ctx, testObj) - assert.NoError(t, err) - err = checkChildrenResourceStatus(ctx, r.client, testObj) - assert.NoError(t, err) - for _, c := range testObj.Status.Conditions { - if c.Type == string(dfv1.VertexConditionPodsHealthy) { - assert.Equal(t, string(corev1.ConditionTrue), string(c.Status)) - } - } - }) -} diff --git a/pkg/reconciler/vertex/watcher.go b/pkg/reconciler/vertex/watcher.go deleted file mode 100644 index 4c2974ede7..0000000000 --- a/pkg/reconciler/vertex/watcher.go +++ /dev/null @@ -1,32 +0,0 @@ -package vertex - -import ( - "fmt" - - corev1 "k8s.io/api/core/v1" -) - -// getVertexStatus calculate the status by iterating over pods objects -func getVertexStatus(pods *corev1.PodList) (string, string, bool) { - // TODO: Need to revisit later. - if len(pods.Items) == 0 { - return "No Pods found", "NoPodsFound", true - } else { - for _, pod := range pods.Items { - if !isContainerHealthy(&pod) { - return fmt.Sprintf("Pod %s is not healthy", pod.Name), "CrashLoopBackOff", false - } - } - } - - return "All vertex pods are healthy", "Running", true -} - -func isContainerHealthy(pod *corev1.Pod) bool { - for _, c := range pod.Status.ContainerStatuses { - if c.State.Waiting != nil && c.State.Waiting.Reason == "CrashLoopBackOff" { - return false - } - } - return true -} diff --git a/pkg/reconciler/vertex/watcher_test.go b/pkg/reconciler/vertex/watcher_test.go deleted file mode 100644 index b750f7b2dc..0000000000 --- a/pkg/reconciler/vertex/watcher_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package vertex - -import ( - "testing" - - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestGetVertexStatus(t *testing.T) { - t.Run("Test Vertex status as true", func(t *testing.T) { - pods := corev1.PodList{Items: []corev1.Pod{ - {ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}, Status: corev1.PodStatus{ - ContainerStatuses: []corev1.ContainerStatus{ - {State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{Reason: "Running"}}}, - }}, - }}, - } - message, reason, done := getVertexStatus(&pods) - assert.Equal(t, "All vertex pods are healthy", message) - assert.Equal(t, "Running", reason) - assert.True(t, done) - }) - - t.Run("Test Vertex status as false", func(t *testing.T) { - pods := corev1.PodList{ - Items: []corev1.Pod{ - {ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}, Status: corev1.PodStatus{ - ContainerStatuses: []corev1.ContainerStatus{ - {State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{Reason: "CrashLoopBackOff"}}}, - }}, - }, - }, - } - message, reason, done := getVertexStatus(&pods) - assert.Equal(t, "Pod test-pod is not healthy", message) - assert.Equal(t, "CrashLoopBackOff", reason) - assert.False(t, done) - }) - - t.Run("Test Vertex status as false with no pods", func(t *testing.T) { - pods := corev1.PodList{ - Items: []corev1.Pod{}, - } - message, reason, done := getVertexStatus(&pods) - assert.Equal(t, "No Pods found", message) - assert.Equal(t, "NoPodsFound", reason) - assert.True(t, done) - }) -} diff --git a/pkg/sources/kafka/handler_test.go b/pkg/sources/kafka/handler_test.go index b5d4892312..a428b3c669 100644 --- a/pkg/sources/kafka/handler_test.go +++ b/pkg/sources/kafka/handler_test.go @@ -52,9 +52,7 @@ func TestMessageHandling(t *testing.T) { }} vi := &dfv1.VertexInstance{ - Vertex: vertex, - Hostname: "test-host", - Replica: 0, + Vertex: vertex, } ks := &kafkaSource{ diff --git a/pkg/sources/kafka/reader_test.go b/pkg/sources/kafka/reader_test.go index 4e43c496ee..2d9cd49cba 100644 --- a/pkg/sources/kafka/reader_test.go +++ b/pkg/sources/kafka/reader_test.go @@ -56,9 +56,7 @@ func TestKafkaSource_Read(t *testing.T) { }} vi := &dfv1.VertexInstance{ - Vertex: vertex, - Hostname: "test-host", - Replica: 0, + Vertex: vertex, } // Create a new Sarama mock client From bf4a269d3f7469ec52e587517495ad67443fd604 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Mon, 5 Aug 2024 14:00:05 -0700 Subject: [PATCH 04/23] feat: Rust k8s model for Numaflow (#1898) Signed-off-by: Vigith Maurice Signed-off-by: Derek Wang Signed-off-by: Yashash H L Co-authored-by: Derek Wang Co-authored-by: Yashash H L --- .gitignore | 10 + Makefile | 3 +- serving/Cargo.toml | 2 +- serving/numaflow-models/Cargo.toml | 20 ++ serving/numaflow-models/Makefile | 63 +++++++ serving/numaflow-models/hack/swaggerfilter.py | 42 +++++ .../numaflow-models/src/apis/configuration.rs | 53 ++++++ serving/numaflow-models/src/apis/mod.rs | 94 ++++++++++ serving/numaflow-models/src/lib.rs | 10 + .../src/models/abstract_pod_template.rs | 75 ++++++++ .../src/models/abstract_sink.rs | 37 ++++ .../src/models/abstract_vertex.rs | 119 ++++++++++++ .../src/models/authorization.rs | 28 +++ .../numaflow-models/src/models/basic_auth.rs | 33 ++++ .../numaflow-models/src/models/blackhole.rs | 27 +++ .../src/models/buffer_service_config.rs | 31 ++++ .../src/models/combined_edge.rs | 62 +++++++ .../numaflow-models/src/models/container.rs | 54 ++++++ .../src/models/container_builder.rs | 115 ++++++++++++ .../src/models/container_template.rs | 42 +++++ .../src/models/daemon_template.rs | 83 +++++++++ serving/numaflow-models/src/models/edge.rs | 38 ++++ .../src/models/fixed_window.rs | 34 ++++ .../src/models/forward_conditions.rs | 28 +++ .../numaflow-models/src/models/function.rs | 34 ++++ .../src/models/generator_source.rs | 50 +++++ .../src/models/get_container_req.rs | 43 +++++ .../src/models/get_daemon_deployment_req.rs | 40 ++++ .../models/get_jet_stream_service_spec_req.rs | 40 ++++ .../get_jet_stream_stateful_set_spec_req.rs | 70 +++++++ .../src/models/get_redis_service_spec_req.rs | 34 ++++ .../models/get_redis_stateful_set_spec_req.rs | 73 ++++++++ .../models/get_side_input_deployment_req.rs | 40 ++++ .../src/models/get_vertex_pod_spec_req.rs | 49 +++++ .../numaflow-models/src/models/group_by.rs | 39 ++++ serving/numaflow-models/src/models/gssapi.rs | 63 +++++++ .../numaflow-models/src/models/http_source.rs | 32 ++++ .../numaflow-models/src/models/idle_source.rs | 34 ++++ .../src/models/inter_step_buffer_service.rs | 42 +++++ .../models/inter_step_buffer_service_list.rs | 41 +++++ .../models/inter_step_buffer_service_spec.rs | 31 ++++ .../inter_step_buffer_service_status.rs | 44 +++++ .../src/models/jet_stream_buffer_service.rs | 113 ++++++++++++ .../src/models/jet_stream_config.rs | 39 ++++ .../src/models/jet_stream_source.rs | 39 ++++ .../src/models/job_template.rs | 84 +++++++++ .../numaflow-models/src/models/kafka_sink.rs | 40 ++++ .../src/models/kafka_source.rs | 43 +++++ .../numaflow-models/src/models/lifecycle.rs | 37 ++++ serving/numaflow-models/src/models/log.rs | 25 +++ .../numaflow-models/src/models/metadata.rs | 31 ++++ serving/numaflow-models/src/models/mod.rs | 172 ++++++++++++++++++ .../src/models/native_redis.rs | 99 ++++++++++ .../numaflow-models/src/models/nats_auth.rs | 36 ++++ .../numaflow-models/src/models/nats_source.rs | 43 +++++ .../numaflow-models/src/models/no_store.rs | 27 +++ .../numaflow-models/src/models/pbq_storage.rs | 36 ++++ .../src/models/persistence_strategy.rs | 38 ++++ .../numaflow-models/src/models/pipeline.rs | 42 +++++ .../src/models/pipeline_limits.rs | 40 ++++ .../src/models/pipeline_list.rs | 39 ++++ .../src/models/pipeline_spec.rs | 51 ++++++ .../src/models/pipeline_status.rs | 59 ++++++ .../src/models/redis_buffer_service.rs | 31 ++++ .../src/models/redis_config.rs | 47 +++++ .../src/models/redis_settings.rs | 41 +++++ serving/numaflow-models/src/models/sasl.rs | 41 +++++ .../numaflow-models/src/models/sasl_plain.rs | 34 ++++ serving/numaflow-models/src/models/scale.rs | 71 ++++++++ .../src/models/serving_source.rs | 41 +++++ .../src/models/serving_store.rs | 34 ++++ .../src/models/session_window.rs | 30 +++ .../numaflow-models/src/models/side_input.rs | 39 ++++ .../src/models/side_input_trigger.rs | 32 ++++ .../models/side_inputs_manager_template.rs | 79 ++++++++ serving/numaflow-models/src/models/sink.rs | 40 ++++ .../src/models/sliding_window.rs | 37 ++++ serving/numaflow-models/src/models/source.rs | 49 +++++ serving/numaflow-models/src/models/status.rs | 31 ++++ .../src/models/tag_conditions.rs | 33 ++++ .../numaflow-models/src/models/templates.rs | 37 ++++ serving/numaflow-models/src/models/tls.rs | 37 ++++ .../numaflow-models/src/models/transformer.rs | 34 ++++ serving/numaflow-models/src/models/ud_sink.rs | 28 +++ .../numaflow-models/src/models/ud_source.rs | 28 +++ .../src/models/ud_transformer.rs | 31 ++++ serving/numaflow-models/src/models/udf.rs | 34 ++++ serving/numaflow-models/src/models/vertex.rs | 42 +++++ .../src/models/vertex_instance.rs | 36 ++++ .../src/models/vertex_limits.rs | 40 ++++ .../numaflow-models/src/models/vertex_list.rs | 39 ++++ .../numaflow-models/src/models/vertex_spec.rs | 137 ++++++++++++++ .../src/models/vertex_status.rs | 50 +++++ .../src/models/vertex_template.rs | 79 ++++++++ .../numaflow-models/src/models/watermark.rs | 35 ++++ serving/numaflow-models/src/models/window.rs | 36 ++++ 96 files changed, 4466 insertions(+), 2 deletions(-) create mode 100644 serving/numaflow-models/Cargo.toml create mode 100644 serving/numaflow-models/Makefile create mode 100755 serving/numaflow-models/hack/swaggerfilter.py create mode 100644 serving/numaflow-models/src/apis/configuration.rs create mode 100644 serving/numaflow-models/src/apis/mod.rs create mode 100644 serving/numaflow-models/src/lib.rs create mode 100644 serving/numaflow-models/src/models/abstract_pod_template.rs create mode 100644 serving/numaflow-models/src/models/abstract_sink.rs create mode 100644 serving/numaflow-models/src/models/abstract_vertex.rs create mode 100644 serving/numaflow-models/src/models/authorization.rs create mode 100644 serving/numaflow-models/src/models/basic_auth.rs create mode 100644 serving/numaflow-models/src/models/blackhole.rs create mode 100644 serving/numaflow-models/src/models/buffer_service_config.rs create mode 100644 serving/numaflow-models/src/models/combined_edge.rs create mode 100644 serving/numaflow-models/src/models/container.rs create mode 100644 serving/numaflow-models/src/models/container_builder.rs create mode 100644 serving/numaflow-models/src/models/container_template.rs create mode 100644 serving/numaflow-models/src/models/daemon_template.rs create mode 100644 serving/numaflow-models/src/models/edge.rs create mode 100644 serving/numaflow-models/src/models/fixed_window.rs create mode 100644 serving/numaflow-models/src/models/forward_conditions.rs create mode 100644 serving/numaflow-models/src/models/function.rs create mode 100644 serving/numaflow-models/src/models/generator_source.rs create mode 100644 serving/numaflow-models/src/models/get_container_req.rs create mode 100644 serving/numaflow-models/src/models/get_daemon_deployment_req.rs create mode 100644 serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs create mode 100644 serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs create mode 100644 serving/numaflow-models/src/models/get_redis_service_spec_req.rs create mode 100644 serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs create mode 100644 serving/numaflow-models/src/models/get_side_input_deployment_req.rs create mode 100644 serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs create mode 100644 serving/numaflow-models/src/models/group_by.rs create mode 100644 serving/numaflow-models/src/models/gssapi.rs create mode 100644 serving/numaflow-models/src/models/http_source.rs create mode 100644 serving/numaflow-models/src/models/idle_source.rs create mode 100644 serving/numaflow-models/src/models/inter_step_buffer_service.rs create mode 100644 serving/numaflow-models/src/models/inter_step_buffer_service_list.rs create mode 100644 serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs create mode 100644 serving/numaflow-models/src/models/inter_step_buffer_service_status.rs create mode 100644 serving/numaflow-models/src/models/jet_stream_buffer_service.rs create mode 100644 serving/numaflow-models/src/models/jet_stream_config.rs create mode 100644 serving/numaflow-models/src/models/jet_stream_source.rs create mode 100644 serving/numaflow-models/src/models/job_template.rs create mode 100644 serving/numaflow-models/src/models/kafka_sink.rs create mode 100644 serving/numaflow-models/src/models/kafka_source.rs create mode 100644 serving/numaflow-models/src/models/lifecycle.rs create mode 100644 serving/numaflow-models/src/models/log.rs create mode 100644 serving/numaflow-models/src/models/metadata.rs create mode 100644 serving/numaflow-models/src/models/mod.rs create mode 100644 serving/numaflow-models/src/models/native_redis.rs create mode 100644 serving/numaflow-models/src/models/nats_auth.rs create mode 100644 serving/numaflow-models/src/models/nats_source.rs create mode 100644 serving/numaflow-models/src/models/no_store.rs create mode 100644 serving/numaflow-models/src/models/pbq_storage.rs create mode 100644 serving/numaflow-models/src/models/persistence_strategy.rs create mode 100644 serving/numaflow-models/src/models/pipeline.rs create mode 100644 serving/numaflow-models/src/models/pipeline_limits.rs create mode 100644 serving/numaflow-models/src/models/pipeline_list.rs create mode 100644 serving/numaflow-models/src/models/pipeline_spec.rs create mode 100644 serving/numaflow-models/src/models/pipeline_status.rs create mode 100644 serving/numaflow-models/src/models/redis_buffer_service.rs create mode 100644 serving/numaflow-models/src/models/redis_config.rs create mode 100644 serving/numaflow-models/src/models/redis_settings.rs create mode 100644 serving/numaflow-models/src/models/sasl.rs create mode 100644 serving/numaflow-models/src/models/sasl_plain.rs create mode 100644 serving/numaflow-models/src/models/scale.rs create mode 100644 serving/numaflow-models/src/models/serving_source.rs create mode 100644 serving/numaflow-models/src/models/serving_store.rs create mode 100644 serving/numaflow-models/src/models/session_window.rs create mode 100644 serving/numaflow-models/src/models/side_input.rs create mode 100644 serving/numaflow-models/src/models/side_input_trigger.rs create mode 100644 serving/numaflow-models/src/models/side_inputs_manager_template.rs create mode 100644 serving/numaflow-models/src/models/sink.rs create mode 100644 serving/numaflow-models/src/models/sliding_window.rs create mode 100644 serving/numaflow-models/src/models/source.rs create mode 100644 serving/numaflow-models/src/models/status.rs create mode 100644 serving/numaflow-models/src/models/tag_conditions.rs create mode 100644 serving/numaflow-models/src/models/templates.rs create mode 100644 serving/numaflow-models/src/models/tls.rs create mode 100644 serving/numaflow-models/src/models/transformer.rs create mode 100644 serving/numaflow-models/src/models/ud_sink.rs create mode 100644 serving/numaflow-models/src/models/ud_source.rs create mode 100644 serving/numaflow-models/src/models/ud_transformer.rs create mode 100644 serving/numaflow-models/src/models/udf.rs create mode 100644 serving/numaflow-models/src/models/vertex.rs create mode 100644 serving/numaflow-models/src/models/vertex_instance.rs create mode 100644 serving/numaflow-models/src/models/vertex_limits.rs create mode 100644 serving/numaflow-models/src/models/vertex_list.rs create mode 100644 serving/numaflow-models/src/models/vertex_spec.rs create mode 100644 serving/numaflow-models/src/models/vertex_status.rs create mode 100644 serving/numaflow-models/src/models/vertex_template.rs create mode 100644 serving/numaflow-models/src/models/watermark.rs create mode 100644 serving/numaflow-models/src/models/window.rs diff --git a/.gitignore b/.gitignore index 076b62f98b..a4acf2f150 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,16 @@ site/ *.pytest_cache/ docs/APIs.html +# openapi +*/numaflow-models/docs +*/numaflow-models/.gitignore +*/numaflow-models/.openapi-generator +*/numaflow-models/.openapi-generator-ignore +*/numaflow-models/.travis.yml +*/numaflow-models/README.md +*/numaflow-models/git_push.sh +*/numaflow-models/openapitools.json + # Generated by Cargo # will have compiled files and executables debug/ diff --git a/Makefile b/Makefile index b3db2062d5..e4a7b170c5 100644 --- a/Makefile +++ b/Makefile @@ -195,6 +195,7 @@ codegen: $(MAKE) manifests rm -rf ./vendor go mod tidy + $(MAKE) --directory serving/numaflow-models generate clean: -rm -rf ${CURRENT_DIR}/dist @@ -322,4 +323,4 @@ update-manifests-version: cat config/extensions/webhook/kustomization.yaml | sed 's/newTag: .*/newTag: $(VERSION)/' | sed 's@value: quay.io/numaproj/numaflow:.*@value: quay.io/numaproj/numaflow:$(VERSION)@' > /tmp/tmp_kustomization.yaml mv /tmp/tmp_kustomization.yaml config/extensions/webhook/kustomization.yaml cat Makefile | sed 's/^VERSION?=.*/VERSION?=$(VERSION)/' | sed 's/^BASE_VERSION:=.*/BASE_VERSION:=$(VERSION)/' > /tmp/ae_makefile - mv /tmp/ae_makefile Makefile \ No newline at end of file + mv /tmp/ae_makefile Makefile diff --git a/serving/Cargo.toml b/serving/Cargo.toml index b4969586e4..6353aac742 100644 --- a/serving/Cargo.toml +++ b/serving/Cargo.toml @@ -1,4 +1,4 @@ -workspace = { members = ["backoff", "extras/upstreams", "servesink"] } +workspace = { members = ["backoff", "extras/upstreams", "numaflow-models", "servesink"] } [package] name = "serve" version = "0.1.0" diff --git a/serving/numaflow-models/Cargo.toml b/serving/numaflow-models/Cargo.toml new file mode 100644 index 0000000000..1585b7ee0e --- /dev/null +++ b/serving/numaflow-models/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "numaflow-models" +version = "0.0.0-pre" +authors = ["Numaflow Developers"] +description = "No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)" +# Override this license by providing a License Object in the OpenAPI. +license = "Apache License 2.0" +edition = "2021" + +[dependencies] +k8s-openapi = { version = "0.22.0", features = ["v1_29"] } +kube = "0.93.1" +serde = "^1.0" +serde_derive = "^1.0" +serde_json = "^1.0" +url = "^2.2" +uuid = { version = "^1.0", features = ["serde", "v4"] } +[dependencies.reqwest] +version = "^0.11" +features = ["json", "multipart"] diff --git a/serving/numaflow-models/Makefile b/serving/numaflow-models/Makefile new file mode 100644 index 0000000000..9e82883f4d --- /dev/null +++ b/serving/numaflow-models/Makefile @@ -0,0 +1,63 @@ +SHELL:=/bin/bash + +VERSION ?= main + +SDK_VERSION := $(shell if [[ "$(VERSION)" =~ ^v[0-9]+\.[0-9]+\.[0-9]+.* ]]; then echo ${VERSION} | cut -c2-; elif [[ "$(VERSION)" =~ ^main$ ]]; then echo 0.0.0-pre; else echo $(VERSION); fi) + +# Somehow type-mappings stopped working starting from v7.4.0 +GENERATOR_VERSION := v7.3.0 + +DOCKER = docker run --rm -v `pwd -P`:/base --workdir /base + +publish: generate + echo TODO + +generate: + rm -Rf ./docs ./test ./numaflow/models/ ./numaflow/model/ + mkdir -p ./dist + cat ../../api/openapi-spec/swagger.json | ./hack/swaggerfilter.py io.numaproj.numaflow | \ + sed 's/io.k8s.api.core.v1./CoreV1/' | \ + sed 's/io.k8s.apimachinery.pkg.apis.meta.v1./MetaV1/' | \ + sed 's/io.k8s.apimachinery.pkg.api.resource.Quantity/ResourceQuantity/' | \ + sed 's/io.numaproj.numaflow.v1alpha1.//' \ + > ./dist/swagger.json + $(DOCKER) openapitools/openapi-generator-cli:$(GENERATOR_VERSION) \ + generate \ + -i /base/dist/swagger.json \ + -g rust \ + -o /base \ + --remove-operation-id-prefix \ + --model-name-prefix '' \ + --model-name-suffix '' \ + --additional-properties packageName=numaflow-models \ + --additional-properties packageVersion=${SDK_VERSION} \ + --type-mappings CoreV1Affinity="k8s_openapi::api::core::v1::Affinity" \ + --type-mappings CoreV1SecretKeySelector="k8s_openapi::api::core::v1::SecretKeySelector" \ + --type-mappings CoreV1EnvVar="k8s_openapi::api::core::v1::EnvVar" \ + --type-mappings CoreV1EnvFromSource="k8s_openapi::api::core::v1::EnvFromSource" \ + --type-mappings CoreV1Lifecycle="k8s_openapi::api::core::v1::Lifecycle" \ + --type-mappings CoreV1Probe="k8s_openapi::api::core::v1::Probe" \ + --type-mappings CoreV1ContainerPort="k8s_openapi::api::core::v1::ContainerPort" \ + --type-mappings CoreV1ResourceRequirements="k8s_openapi::api::core::v1::ResourceRequirements" \ + --type-mappings CoreV1SecurityContext="k8s_openapi::api::core::v1::SecurityContext" \ + --type-mappings CoreV1VolumeDevice="k8s_openapi::api::core::v1::VolumeDevice" \ + --type-mappings CoreV1VolumeMount="k8s_openapi::api::core::v1::VolumeMount" \ + --type-mappings CoreV1Toleration="k8s_openapi::api::core::v1::Toleration" \ + --type-mappings CoreV1PodSecurityContext="k8s_openapi::api::core::v1::PodSecurityContext" \ + --type-mappings CoreV1LocalObjectReference="k8s_openapi::api::core::v1::LocalObjectReference" \ + --type-mappings CoreV1PodDNSConfig="k8s_openapi::api::core::v1::PodDNSConfig" \ + --type-mappings CoreV1ContainerResizePolicy="k8s_openapi::api::core::v1::ContainerResizePolicy" \ + --type-mappings CoreV1Container="k8s_openapi::api::core::v1::Container" \ + --type-mappings CoreV1Volume="k8s_openapi::api::core::v1::Volume" \ + --type-mappings CoreV1EmptyDirVolumeSource="k8s_openapi::api::core::v1::EmptyDirVolumeSource" \ + --type-mappings MetaV1Duration="kube::core::Duration" \ + --type-mappings MetaV1ListMeta="k8s_openapi::apimachinery::pkg::apis::meta::v1::ListMeta" \ + --type-mappings MetaV1Condition="k8s_openapi::apimachinery::pkg::apis::meta::v1::Condition" \ + --type-mappings MetaV1Time="k8s_openapi::apimachinery::pkg::apis::meta::v1::Time" \ + --type-mappings MetaV1ObjectMeta="k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta" \ + --type-mappings ResourceQuantity="k8s_openapi::apimachinery::pkg::api::resource::Quantity" \ + --generate-alias-as-model + + sed -e 's/edition = "2018"/edition = "2021"/g' -e 's/authors =.*/authors = \["Numaflow Developers"\]/' -e 's/license =.*/license = "Apache License 2.0"/' Cargo.toml > tmp && mv tmp Cargo.toml + cargo add kube + cargo add k8s-openapi --features v1_29 diff --git a/serving/numaflow-models/hack/swaggerfilter.py b/serving/numaflow-models/hack/swaggerfilter.py new file mode 100755 index 0000000000..58db014524 --- /dev/null +++ b/serving/numaflow-models/hack/swaggerfilter.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 + +# +# Filter swagger file +# + +import os +from os import path +import sys +import json + + +def main(): + if sys.stdin.isatty(): + print('ERROR: swagger json needs to be piped in as stdin') + exit(1) + if len(sys.argv) < 2: + print('ERROR: definition prefix needs to be provided') + exit(1) + prefix = sys.argv[1] + + try: + swagger = json.load(sys.stdin) + except Exception as e: + print("ERROR: not a valid json input - {0}".format(e)) + exit(1) + + defs = swagger["definitions"] + for k in list(defs.keys()): + if not k.startswith(prefix): + del defs[k] + continue + + if k in ['io.numaproj.numaflow.v1alpha1.Blackhole', 'io.numaproj.numaflow.v1alpha1.Log', 'io.numaproj.numaflow.v1alpha1.NoStore']: + defs[k]['allOf'] = [] + + json_object = json.dumps(swagger, indent=4) + print(json_object) + + +if __name__ == "__main__": + main() diff --git a/serving/numaflow-models/src/apis/configuration.rs b/serving/numaflow-models/src/apis/configuration.rs new file mode 100644 index 0000000000..5c65cc5721 --- /dev/null +++ b/serving/numaflow-models/src/apis/configuration.rs @@ -0,0 +1,53 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + +#[derive(Debug, Clone)] +pub struct Configuration { + pub base_path: String, + pub user_agent: Option, + pub client: reqwest::Client, + pub basic_auth: Option, + pub oauth_access_token: Option, + pub bearer_access_token: Option, + pub api_key: Option, + // TODO: take an oauth2 token source, similar to the go one +} + +pub type BasicAuth = (String, Option); + +#[derive(Debug, Clone)] +pub struct ApiKey { + pub prefix: Option, + pub key: String, +} + + +impl Configuration { + pub fn new() -> Configuration { + Configuration::default() + } +} + +impl Default for Configuration { + fn default() -> Self { + Configuration { + base_path: "http://localhost".to_owned(), + user_agent: Some("OpenAPI-Generator/latest/rust".to_owned()), + client: reqwest::Client::new(), + basic_auth: None, + oauth_access_token: None, + bearer_access_token: None, + api_key: None, + + } + } +} diff --git a/serving/numaflow-models/src/apis/mod.rs b/serving/numaflow-models/src/apis/mod.rs new file mode 100644 index 0000000000..dccbc940fb --- /dev/null +++ b/serving/numaflow-models/src/apis/mod.rs @@ -0,0 +1,94 @@ +use std::error; +use std::fmt; + +#[derive(Debug, Clone)] +pub struct ResponseContent { + pub status: reqwest::StatusCode, + pub content: String, + pub entity: Option, +} + +#[derive(Debug)] +pub enum Error { + Reqwest(reqwest::Error), + Serde(serde_json::Error), + Io(std::io::Error), + ResponseError(ResponseContent), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let (module, e) = match self { + Error::Reqwest(e) => ("reqwest", e.to_string()), + Error::Serde(e) => ("serde", e.to_string()), + Error::Io(e) => ("IO", e.to_string()), + Error::ResponseError(e) => ("response", format!("status code {}", e.status)), + }; + write!(f, "error in {}: {}", module, e) + } +} + +impl error::Error for Error { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + Some(match self { + Error::Reqwest(e) => e, + Error::Serde(e) => e, + Error::Io(e) => e, + Error::ResponseError(_) => return None, + }) + } +} + +impl From for Error { + fn from(e: reqwest::Error) -> Self { + Error::Reqwest(e) + } +} + +impl From for Error { + fn from(e: serde_json::Error) -> Self { + Error::Serde(e) + } +} + +impl From for Error { + fn from(e: std::io::Error) -> Self { + Error::Io(e) + } +} + +pub fn urlencode>(s: T) -> String { + ::url::form_urlencoded::byte_serialize(s.as_ref().as_bytes()).collect() +} + +pub fn parse_deep_object(prefix: &str, value: &serde_json::Value) -> Vec<(String, String)> { + if let serde_json::Value::Object(object) = value { + let mut params = vec![]; + + for (key, value) in object { + match value { + serde_json::Value::Object(_) => params.append(&mut parse_deep_object( + &format!("{}[{}]", prefix, key), + value, + )), + serde_json::Value::Array(array) => { + for (i, value) in array.iter().enumerate() { + params.append(&mut parse_deep_object( + &format!("{}[{}][{}]", prefix, key, i), + value, + )); + } + }, + serde_json::Value::String(s) => params.push((format!("{}[{}]", prefix, key), s.clone())), + _ => params.push((format!("{}[{}]", prefix, key), value.to_string())), + } + } + + return params; + } + + unimplemented!("Only objects are supported with style=deepObject") +} + + +pub mod configuration; diff --git a/serving/numaflow-models/src/lib.rs b/serving/numaflow-models/src/lib.rs new file mode 100644 index 0000000000..c1dd666f79 --- /dev/null +++ b/serving/numaflow-models/src/lib.rs @@ -0,0 +1,10 @@ +#[macro_use] +extern crate serde_derive; + +extern crate serde; +extern crate serde_json; +extern crate url; +extern crate reqwest; + +pub mod apis; +pub mod models; diff --git a/serving/numaflow-models/src/models/abstract_pod_template.rs b/serving/numaflow-models/src/models/abstract_pod_template.rs new file mode 100644 index 0000000000..fd19a7b425 --- /dev/null +++ b/serving/numaflow-models/src/models/abstract_pod_template.rs @@ -0,0 +1,75 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// AbstractPodTemplate : AbstractPodTemplate provides a template for pod customization in vertices, daemon deployments and so on. + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct AbstractPodTemplate { + #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] + pub affinity: Option, + /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + pub automount_service_account_token: Option, + #[serde(rename = "dnsConfig", skip_serializing_if = "Option::is_none")] + pub dns_config: Option, + /// Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + #[serde(rename = "dnsPolicy", skip_serializing_if = "Option::is_none")] + pub dns_policy: Option, + /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] + pub image_pull_secrets: Option>, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option>, + /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + #[serde(rename = "nodeSelector", skip_serializing_if = "Option::is_none")] + pub node_selector: Option<::std::collections::HashMap>, + /// The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priority", skip_serializing_if = "Option::is_none")] + pub priority: Option, + /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] + pub priority_class_name: Option, + /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] + pub runtime_class_name: Option, + #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] + pub security_context: Option, + /// ServiceAccountName applied to the pod + #[serde(rename = "serviceAccountName", skip_serializing_if = "Option::is_none")] + pub service_account_name: Option, + /// If specified, the pod's tolerations. + #[serde(rename = "tolerations", skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, +} + +impl AbstractPodTemplate { + /// AbstractPodTemplate provides a template for pod customization in vertices, daemon deployments and so on. + pub fn new() -> AbstractPodTemplate { + AbstractPodTemplate { + affinity: None, + automount_service_account_token: None, + dns_config: None, + dns_policy: None, + image_pull_secrets: None, + metadata: None, + node_selector: None, + priority: None, + priority_class_name: None, + runtime_class_name: None, + security_context: None, + service_account_name: None, + tolerations: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/abstract_sink.rs b/serving/numaflow-models/src/models/abstract_sink.rs new file mode 100644 index 0000000000..48bf8c8276 --- /dev/null +++ b/serving/numaflow-models/src/models/abstract_sink.rs @@ -0,0 +1,37 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct AbstractSink { + #[serde(rename = "blackhole", skip_serializing_if = "Option::is_none")] + pub blackhole: Option>, + #[serde(rename = "kafka", skip_serializing_if = "Option::is_none")] + pub kafka: Option>, + #[serde(rename = "log", skip_serializing_if = "Option::is_none")] + pub log: Option>, + #[serde(rename = "udsink", skip_serializing_if = "Option::is_none")] + pub udsink: Option>, +} + +impl AbstractSink { + pub fn new() -> AbstractSink { + AbstractSink { + blackhole: None, + kafka: None, + log: None, + udsink: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/abstract_vertex.rs b/serving/numaflow-models/src/models/abstract_vertex.rs new file mode 100644 index 0000000000..1ee864662e --- /dev/null +++ b/serving/numaflow-models/src/models/abstract_vertex.rs @@ -0,0 +1,119 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct AbstractVertex { + #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] + pub affinity: Option, + /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + pub automount_service_account_token: Option, + #[serde(rename = "containerTemplate", skip_serializing_if = "Option::is_none")] + pub container_template: Option>, + #[serde(rename = "dnsConfig", skip_serializing_if = "Option::is_none")] + pub dns_config: Option, + /// Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + #[serde(rename = "dnsPolicy", skip_serializing_if = "Option::is_none")] + pub dns_policy: Option, + /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] + pub image_pull_secrets: Option>, + #[serde(rename = "initContainerTemplate", skip_serializing_if = "Option::is_none")] + pub init_container_template: Option>, + /// List of customized init containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + #[serde(rename = "initContainers", skip_serializing_if = "Option::is_none")] + pub init_containers: Option>, + #[serde(rename = "limits", skip_serializing_if = "Option::is_none")] + pub limits: Option>, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option>, + #[serde(rename = "name")] + pub name: String, + /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + #[serde(rename = "nodeSelector", skip_serializing_if = "Option::is_none")] + pub node_selector: Option<::std::collections::HashMap>, + /// Number of partitions of the vertex owned buffers. It applies to udf and sink vertices only. + #[serde(rename = "partitions", skip_serializing_if = "Option::is_none")] + pub partitions: Option, + /// The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priority", skip_serializing_if = "Option::is_none")] + pub priority: Option, + /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] + pub priority_class_name: Option, + /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] + pub runtime_class_name: Option, + #[serde(rename = "scale", skip_serializing_if = "Option::is_none")] + pub scale: Option>, + #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] + pub security_context: Option, + /// ServiceAccountName applied to the pod + #[serde(rename = "serviceAccountName", skip_serializing_if = "Option::is_none")] + pub service_account_name: Option, + /// Names of the side inputs used in this vertex. + #[serde(rename = "sideInputs", skip_serializing_if = "Option::is_none")] + pub side_inputs: Option>, + #[serde(rename = "sideInputsContainerTemplate", skip_serializing_if = "Option::is_none")] + pub side_inputs_container_template: Option>, + /// List of customized sidecar containers belonging to the pod. + #[serde(rename = "sidecars", skip_serializing_if = "Option::is_none")] + pub sidecars: Option>, + #[serde(rename = "sink", skip_serializing_if = "Option::is_none")] + pub sink: Option>, + #[serde(rename = "source", skip_serializing_if = "Option::is_none")] + pub source: Option>, + /// If specified, the pod's tolerations. + #[serde(rename = "tolerations", skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, + #[serde(rename = "udf", skip_serializing_if = "Option::is_none")] + pub udf: Option>, + #[serde(rename = "volumes", skip_serializing_if = "Option::is_none")] + pub volumes: Option>, +} + +impl AbstractVertex { + pub fn new(name: String) -> AbstractVertex { + AbstractVertex { + affinity: None, + automount_service_account_token: None, + container_template: None, + dns_config: None, + dns_policy: None, + image_pull_secrets: None, + init_container_template: None, + init_containers: None, + limits: None, + metadata: None, + name, + node_selector: None, + partitions: None, + priority: None, + priority_class_name: None, + runtime_class_name: None, + scale: None, + security_context: None, + service_account_name: None, + side_inputs: None, + side_inputs_container_template: None, + sidecars: None, + sink: None, + source: None, + tolerations: None, + udf: None, + volumes: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/authorization.rs b/serving/numaflow-models/src/models/authorization.rs new file mode 100644 index 0000000000..6589dc82f4 --- /dev/null +++ b/serving/numaflow-models/src/models/authorization.rs @@ -0,0 +1,28 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Authorization { + #[serde(rename = "token", skip_serializing_if = "Option::is_none")] + pub token: Option, +} + +impl Authorization { + pub fn new() -> Authorization { + Authorization { + token: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/basic_auth.rs b/serving/numaflow-models/src/models/basic_auth.rs new file mode 100644 index 0000000000..e7a4e7d2c7 --- /dev/null +++ b/serving/numaflow-models/src/models/basic_auth.rs @@ -0,0 +1,33 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// BasicAuth : BasicAuth represents the basic authentication approach which contains a user name and a password. + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BasicAuth { + #[serde(rename = "password", skip_serializing_if = "Option::is_none")] + pub password: Option, + #[serde(rename = "user", skip_serializing_if = "Option::is_none")] + pub user: Option, +} + +impl BasicAuth { + /// BasicAuth represents the basic authentication approach which contains a user name and a password. + pub fn new() -> BasicAuth { + BasicAuth { + password: None, + user: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/blackhole.rs b/serving/numaflow-models/src/models/blackhole.rs new file mode 100644 index 0000000000..f84586c042 --- /dev/null +++ b/serving/numaflow-models/src/models/blackhole.rs @@ -0,0 +1,27 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// Blackhole : Blackhole is a sink to emulate /dev/null + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Blackhole { +} + +impl Blackhole { + /// Blackhole is a sink to emulate /dev/null + pub fn new() -> Blackhole { + Blackhole { + } + } +} + + diff --git a/serving/numaflow-models/src/models/buffer_service_config.rs b/serving/numaflow-models/src/models/buffer_service_config.rs new file mode 100644 index 0000000000..bad5eec796 --- /dev/null +++ b/serving/numaflow-models/src/models/buffer_service_config.rs @@ -0,0 +1,31 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BufferServiceConfig { + #[serde(rename = "jetstream", skip_serializing_if = "Option::is_none")] + pub jetstream: Option>, + #[serde(rename = "redis", skip_serializing_if = "Option::is_none")] + pub redis: Option>, +} + +impl BufferServiceConfig { + pub fn new() -> BufferServiceConfig { + BufferServiceConfig { + jetstream: None, + redis: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/combined_edge.rs b/serving/numaflow-models/src/models/combined_edge.rs new file mode 100644 index 0000000000..b7fdf05446 --- /dev/null +++ b/serving/numaflow-models/src/models/combined_edge.rs @@ -0,0 +1,62 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// CombinedEdge : CombinedEdge is a combination of Edge and some other properties such as vertex type, partitions, limits. It's used to decorate the fromEdges and toEdges of the generated Vertex objects, so that in the vertex pod, it knows the properties of the connected vertices, for example, how many partitioned buffers I should write to, what is the write buffer length, etc. + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct CombinedEdge { + #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + #[serde(rename = "from")] + pub from: String, + #[serde(rename = "fromVertexLimits", skip_serializing_if = "Option::is_none")] + pub from_vertex_limits: Option>, + /// The number of partitions of the from vertex, if not provided, the default value is set to \"1\". + #[serde(rename = "fromVertexPartitionCount", skip_serializing_if = "Option::is_none")] + pub from_vertex_partition_count: Option, + /// From vertex type. + #[serde(rename = "fromVertexType")] + pub from_vertex_type: String, + /// OnFull specifies the behaviour for the write actions when the inter step buffer is full. There are currently two options, retryUntilSuccess and discardLatest. if not provided, the default value is set to \"retryUntilSuccess\" + #[serde(rename = "onFull", skip_serializing_if = "Option::is_none")] + pub on_full: Option, + #[serde(rename = "to")] + pub to: String, + #[serde(rename = "toVertexLimits", skip_serializing_if = "Option::is_none")] + pub to_vertex_limits: Option>, + /// The number of partitions of the to vertex, if not provided, the default value is set to \"1\". + #[serde(rename = "toVertexPartitionCount", skip_serializing_if = "Option::is_none")] + pub to_vertex_partition_count: Option, + /// To vertex type. + #[serde(rename = "toVertexType")] + pub to_vertex_type: String, +} + +impl CombinedEdge { + /// CombinedEdge is a combination of Edge and some other properties such as vertex type, partitions, limits. It's used to decorate the fromEdges and toEdges of the generated Vertex objects, so that in the vertex pod, it knows the properties of the connected vertices, for example, how many partitioned buffers I should write to, what is the write buffer length, etc. + pub fn new(from: String, from_vertex_type: String, to: String, to_vertex_type: String) -> CombinedEdge { + CombinedEdge { + conditions: None, + from, + from_vertex_limits: None, + from_vertex_partition_count: None, + from_vertex_type, + on_full: None, + to, + to_vertex_limits: None, + to_vertex_partition_count: None, + to_vertex_type, + } + } +} + + diff --git a/serving/numaflow-models/src/models/container.rs b/serving/numaflow-models/src/models/container.rs new file mode 100644 index 0000000000..22d8643134 --- /dev/null +++ b/serving/numaflow-models/src/models/container.rs @@ -0,0 +1,54 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// Container : Container is used to define the container properties for user-defined functions, sinks, etc. + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Container { + #[serde(rename = "args", skip_serializing_if = "Option::is_none")] + pub args: Option>, + #[serde(rename = "command", skip_serializing_if = "Option::is_none")] + pub command: Option>, + #[serde(rename = "env", skip_serializing_if = "Option::is_none")] + pub env: Option>, + #[serde(rename = "envFrom", skip_serializing_if = "Option::is_none")] + pub env_from: Option>, + #[serde(rename = "image", skip_serializing_if = "Option::is_none")] + pub image: Option, + #[serde(rename = "imagePullPolicy", skip_serializing_if = "Option::is_none")] + pub image_pull_policy: Option, + #[serde(rename = "resources", skip_serializing_if = "Option::is_none")] + pub resources: Option, + #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] + pub security_context: Option, + #[serde(rename = "volumeMounts", skip_serializing_if = "Option::is_none")] + pub volume_mounts: Option>, +} + +impl Container { + /// Container is used to define the container properties for user-defined functions, sinks, etc. + pub fn new() -> Container { + Container { + args: None, + command: None, + env: None, + env_from: None, + image: None, + image_pull_policy: None, + resources: None, + security_context: None, + volume_mounts: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/container_builder.rs b/serving/numaflow-models/src/models/container_builder.rs new file mode 100644 index 0000000000..47a84c6fe7 --- /dev/null +++ b/serving/numaflow-models/src/models/container_builder.rs @@ -0,0 +1,115 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct ContainerBuilder { + /// Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + #[serde(rename = "args", skip_serializing_if = "Option::is_none")] + pub args: Option>, + /// Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + #[serde(rename = "command", skip_serializing_if = "Option::is_none")] + pub command: Option>, + /// List of environment variables to set in the container. Cannot be updated. + #[serde(rename = "env", skip_serializing_if = "Option::is_none")] + pub env: Option>, + /// List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + #[serde(rename = "envFrom", skip_serializing_if = "Option::is_none")] + pub env_from: Option>, + /// Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + #[serde(rename = "image", skip_serializing_if = "Option::is_none")] + pub image: Option, + /// Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + #[serde(rename = "imagePullPolicy", skip_serializing_if = "Option::is_none")] + pub image_pull_policy: Option, + #[serde(rename = "lifecycle", skip_serializing_if = "Option::is_none")] + pub lifecycle: Option, + #[serde(rename = "livenessProbe", skip_serializing_if = "Option::is_none")] + pub liveness_probe: Option, + /// Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + #[serde(rename = "name")] + pub name: String, + /// List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + #[serde(rename = "ports", skip_serializing_if = "Option::is_none")] + pub ports: Option>, + #[serde(rename = "readinessProbe", skip_serializing_if = "Option::is_none")] + pub readiness_probe: Option, + /// Resources resize policy for the container. + #[serde(rename = "resizePolicy", skip_serializing_if = "Option::is_none")] + pub resize_policy: Option>, + #[serde(rename = "resources", skip_serializing_if = "Option::is_none")] + pub resources: Option, + /// RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed. + #[serde(rename = "restartPolicy", skip_serializing_if = "Option::is_none")] + pub restart_policy: Option, + #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] + pub security_context: Option, + #[serde(rename = "startupProbe", skip_serializing_if = "Option::is_none")] + pub startup_probe: Option, + /// Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + #[serde(rename = "stdin", skip_serializing_if = "Option::is_none")] + pub stdin: Option, + /// Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + #[serde(rename = "stdinOnce", skip_serializing_if = "Option::is_none")] + pub stdin_once: Option, + /// Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. + #[serde(rename = "terminationMessagePath", skip_serializing_if = "Option::is_none")] + pub termination_message_path: Option, + /// Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + #[serde(rename = "terminationMessagePolicy", skip_serializing_if = "Option::is_none")] + pub termination_message_policy: Option, + /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + #[serde(rename = "tty", skip_serializing_if = "Option::is_none")] + pub tty: Option, + /// volumeDevices is the list of block devices to be used by the container. + #[serde(rename = "volumeDevices", skip_serializing_if = "Option::is_none")] + pub volume_devices: Option>, + /// Pod volumes to mount into the container's filesystem. Cannot be updated. + #[serde(rename = "volumeMounts", skip_serializing_if = "Option::is_none")] + pub volume_mounts: Option>, + /// Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + #[serde(rename = "workingDir", skip_serializing_if = "Option::is_none")] + pub working_dir: Option, +} + +impl ContainerBuilder { + pub fn new(name: String) -> ContainerBuilder { + ContainerBuilder { + args: None, + command: None, + env: None, + env_from: None, + image: None, + image_pull_policy: None, + lifecycle: None, + liveness_probe: None, + name, + ports: None, + readiness_probe: None, + resize_policy: None, + resources: None, + restart_policy: None, + security_context: None, + startup_probe: None, + stdin: None, + stdin_once: None, + termination_message_path: None, + termination_message_policy: None, + tty: None, + volume_devices: None, + volume_mounts: None, + working_dir: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/container_template.rs b/serving/numaflow-models/src/models/container_template.rs new file mode 100644 index 0000000000..737fbf7701 --- /dev/null +++ b/serving/numaflow-models/src/models/container_template.rs @@ -0,0 +1,42 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// ContainerTemplate : ContainerTemplate defines customized spec for a container + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct ContainerTemplate { + #[serde(rename = "env", skip_serializing_if = "Option::is_none")] + pub env: Option>, + #[serde(rename = "envFrom", skip_serializing_if = "Option::is_none")] + pub env_from: Option>, + #[serde(rename = "imagePullPolicy", skip_serializing_if = "Option::is_none")] + pub image_pull_policy: Option, + #[serde(rename = "resources", skip_serializing_if = "Option::is_none")] + pub resources: Option, + #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] + pub security_context: Option, +} + +impl ContainerTemplate { + /// ContainerTemplate defines customized spec for a container + pub fn new() -> ContainerTemplate { + ContainerTemplate { + env: None, + env_from: None, + image_pull_policy: None, + resources: None, + security_context: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/daemon_template.rs b/serving/numaflow-models/src/models/daemon_template.rs new file mode 100644 index 0000000000..82c8e4ecc5 --- /dev/null +++ b/serving/numaflow-models/src/models/daemon_template.rs @@ -0,0 +1,83 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct DaemonTemplate { + #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] + pub affinity: Option, + /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + pub automount_service_account_token: Option, + #[serde(rename = "containerTemplate", skip_serializing_if = "Option::is_none")] + pub container_template: Option>, + #[serde(rename = "dnsConfig", skip_serializing_if = "Option::is_none")] + pub dns_config: Option, + /// Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + #[serde(rename = "dnsPolicy", skip_serializing_if = "Option::is_none")] + pub dns_policy: Option, + /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] + pub image_pull_secrets: Option>, + #[serde(rename = "initContainerTemplate", skip_serializing_if = "Option::is_none")] + pub init_container_template: Option>, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option>, + /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + #[serde(rename = "nodeSelector", skip_serializing_if = "Option::is_none")] + pub node_selector: Option<::std::collections::HashMap>, + /// The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priority", skip_serializing_if = "Option::is_none")] + pub priority: Option, + /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] + pub priority_class_name: Option, + /// Replicas is the number of desired replicas of the Deployment. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller + #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] + pub runtime_class_name: Option, + #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] + pub security_context: Option, + /// ServiceAccountName applied to the pod + #[serde(rename = "serviceAccountName", skip_serializing_if = "Option::is_none")] + pub service_account_name: Option, + /// If specified, the pod's tolerations. + #[serde(rename = "tolerations", skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, +} + +impl DaemonTemplate { + pub fn new() -> DaemonTemplate { + DaemonTemplate { + affinity: None, + automount_service_account_token: None, + container_template: None, + dns_config: None, + dns_policy: None, + image_pull_secrets: None, + init_container_template: None, + metadata: None, + node_selector: None, + priority: None, + priority_class_name: None, + replicas: None, + runtime_class_name: None, + security_context: None, + service_account_name: None, + tolerations: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/edge.rs b/serving/numaflow-models/src/models/edge.rs new file mode 100644 index 0000000000..33afab8014 --- /dev/null +++ b/serving/numaflow-models/src/models/edge.rs @@ -0,0 +1,38 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Edge { + #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + #[serde(rename = "from")] + pub from: String, + /// OnFull specifies the behaviour for the write actions when the inter step buffer is full. There are currently two options, retryUntilSuccess and discardLatest. if not provided, the default value is set to \"retryUntilSuccess\" + #[serde(rename = "onFull", skip_serializing_if = "Option::is_none")] + pub on_full: Option, + #[serde(rename = "to")] + pub to: String, +} + +impl Edge { + pub fn new(from: String, to: String) -> Edge { + Edge { + conditions: None, + from, + on_full: None, + to, + } + } +} + + diff --git a/serving/numaflow-models/src/models/fixed_window.rs b/serving/numaflow-models/src/models/fixed_window.rs new file mode 100644 index 0000000000..e62eee6d1a --- /dev/null +++ b/serving/numaflow-models/src/models/fixed_window.rs @@ -0,0 +1,34 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// FixedWindow : FixedWindow describes a fixed window + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct FixedWindow { + #[serde(rename = "length", skip_serializing_if = "Option::is_none")] + pub length: Option, + /// Streaming should be set to true if the reduce udf is streaming. + #[serde(rename = "streaming", skip_serializing_if = "Option::is_none")] + pub streaming: Option, +} + +impl FixedWindow { + /// FixedWindow describes a fixed window + pub fn new() -> FixedWindow { + FixedWindow { + length: None, + streaming: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/forward_conditions.rs b/serving/numaflow-models/src/models/forward_conditions.rs new file mode 100644 index 0000000000..c75cb03634 --- /dev/null +++ b/serving/numaflow-models/src/models/forward_conditions.rs @@ -0,0 +1,28 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct ForwardConditions { + #[serde(rename = "tags")] + pub tags: Box, +} + +impl ForwardConditions { + pub fn new(tags: crate::models::TagConditions) -> ForwardConditions { + ForwardConditions { + tags: Box::new(tags), + } + } +} + + diff --git a/serving/numaflow-models/src/models/function.rs b/serving/numaflow-models/src/models/function.rs new file mode 100644 index 0000000000..147fd444fe --- /dev/null +++ b/serving/numaflow-models/src/models/function.rs @@ -0,0 +1,34 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Function { + #[serde(rename = "args", skip_serializing_if = "Option::is_none")] + pub args: Option>, + #[serde(rename = "kwargs", skip_serializing_if = "Option::is_none")] + pub kwargs: Option<::std::collections::HashMap>, + #[serde(rename = "name")] + pub name: String, +} + +impl Function { + pub fn new(name: String) -> Function { + Function { + args: None, + kwargs: None, + name, + } + } +} + + diff --git a/serving/numaflow-models/src/models/generator_source.rs b/serving/numaflow-models/src/models/generator_source.rs new file mode 100644 index 0000000000..d22bb877da --- /dev/null +++ b/serving/numaflow-models/src/models/generator_source.rs @@ -0,0 +1,50 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GeneratorSource { + #[serde(rename = "duration", skip_serializing_if = "Option::is_none")] + pub duration: Option, + #[serde(rename = "jitter", skip_serializing_if = "Option::is_none")] + pub jitter: Option, + /// KeyCount is the number of unique keys in the payload + #[serde(rename = "keyCount", skip_serializing_if = "Option::is_none")] + pub key_count: Option, + /// Size of each generated message + #[serde(rename = "msgSize", skip_serializing_if = "Option::is_none")] + pub msg_size: Option, + #[serde(rename = "rpu", skip_serializing_if = "Option::is_none")] + pub rpu: Option, + /// Value is an optional uint64 value to be written in to the payload + #[serde(rename = "value", skip_serializing_if = "Option::is_none")] + pub value: Option, + /// ValueBlob is an optional string which is the base64 encoding of direct payload to send. This is useful for attaching a GeneratorSource to a true pipeline to test load behavior with true messages without requiring additional work to generate messages through the external source if present, the Value and MsgSize fields will be ignored. + #[serde(rename = "valueBlob", skip_serializing_if = "Option::is_none")] + pub value_blob: Option, +} + +impl GeneratorSource { + pub fn new() -> GeneratorSource { + GeneratorSource { + duration: None, + jitter: None, + key_count: None, + msg_size: None, + rpu: None, + value: None, + value_blob: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/get_container_req.rs b/serving/numaflow-models/src/models/get_container_req.rs new file mode 100644 index 0000000000..5b2695f98f --- /dev/null +++ b/serving/numaflow-models/src/models/get_container_req.rs @@ -0,0 +1,43 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetContainerReq { + #[serde(rename = "env")] + pub env: Vec, + #[serde(rename = "image")] + pub image: String, + #[serde(rename = "imagePullPolicy")] + pub image_pull_policy: String, + #[serde(rename = "isbSvcType")] + pub isb_svc_type: String, + #[serde(rename = "resources")] + pub resources: k8s_openapi::api::core::v1::ResourceRequirements, + #[serde(rename = "volumeMounts")] + pub volume_mounts: Vec, +} + +impl GetContainerReq { + pub fn new(env: Vec, image: String, image_pull_policy: String, isb_svc_type: String, resources: k8s_openapi::api::core::v1::ResourceRequirements, volume_mounts: Vec) -> GetContainerReq { + GetContainerReq { + env, + image, + image_pull_policy, + isb_svc_type, + resources, + volume_mounts, + } + } +} + + diff --git a/serving/numaflow-models/src/models/get_daemon_deployment_req.rs b/serving/numaflow-models/src/models/get_daemon_deployment_req.rs new file mode 100644 index 0000000000..4f8a7cc3b9 --- /dev/null +++ b/serving/numaflow-models/src/models/get_daemon_deployment_req.rs @@ -0,0 +1,40 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetDaemonDeploymentReq { + #[serde(rename = "DefaultResources")] + pub default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + #[serde(rename = "Env")] + pub env: Vec, + #[serde(rename = "ISBSvcType")] + pub isb_svc_type: String, + #[serde(rename = "Image")] + pub image: String, + #[serde(rename = "PullPolicy")] + pub pull_policy: String, +} + +impl GetDaemonDeploymentReq { + pub fn new(default_resources: k8s_openapi::api::core::v1::ResourceRequirements, env: Vec, isb_svc_type: String, image: String, pull_policy: String) -> GetDaemonDeploymentReq { + GetDaemonDeploymentReq { + default_resources, + env, + isb_svc_type, + image, + pull_policy, + } + } +} + + diff --git a/serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs b/serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs new file mode 100644 index 0000000000..9c7891afd4 --- /dev/null +++ b/serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs @@ -0,0 +1,40 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetJetStreamServiceSpecReq { + #[serde(rename = "ClientPort")] + pub client_port: i32, + #[serde(rename = "ClusterPort")] + pub cluster_port: i32, + #[serde(rename = "Labels")] + pub labels: ::std::collections::HashMap, + #[serde(rename = "MetricsPort")] + pub metrics_port: i32, + #[serde(rename = "MonitorPort")] + pub monitor_port: i32, +} + +impl GetJetStreamServiceSpecReq { + pub fn new(client_port: i32, cluster_port: i32, labels: ::std::collections::HashMap, metrics_port: i32, monitor_port: i32) -> GetJetStreamServiceSpecReq { + GetJetStreamServiceSpecReq { + client_port, + cluster_port, + labels, + metrics_port, + monitor_port, + } + } +} + + diff --git a/serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs b/serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs new file mode 100644 index 0000000000..894ae495fd --- /dev/null +++ b/serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs @@ -0,0 +1,70 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetJetStreamStatefulSetSpecReq { + #[serde(rename = "ClientPort")] + pub client_port: i32, + #[serde(rename = "ClusterPort")] + pub cluster_port: i32, + #[serde(rename = "ConfigMapName")] + pub config_map_name: String, + #[serde(rename = "ConfigReloaderImage")] + pub config_reloader_image: String, + #[serde(rename = "DefaultResources")] + pub default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + #[serde(rename = "Labels")] + pub labels: ::std::collections::HashMap, + #[serde(rename = "MetricsExporterImage")] + pub metrics_exporter_image: String, + #[serde(rename = "MetricsPort")] + pub metrics_port: i32, + #[serde(rename = "MonitorPort")] + pub monitor_port: i32, + #[serde(rename = "NatsImage")] + pub nats_image: String, + #[serde(rename = "PvcNameIfNeeded")] + pub pvc_name_if_needed: String, + #[serde(rename = "ServerAuthSecretName")] + pub server_auth_secret_name: String, + #[serde(rename = "ServerEncryptionSecretName")] + pub server_encryption_secret_name: String, + #[serde(rename = "ServiceName")] + pub service_name: String, + #[serde(rename = "StartCommand")] + pub start_command: String, +} + +impl GetJetStreamStatefulSetSpecReq { + pub fn new(client_port: i32, cluster_port: i32, config_map_name: String, config_reloader_image: String, default_resources: k8s_openapi::api::core::v1::ResourceRequirements, labels: ::std::collections::HashMap, metrics_exporter_image: String, metrics_port: i32, monitor_port: i32, nats_image: String, pvc_name_if_needed: String, server_auth_secret_name: String, server_encryption_secret_name: String, service_name: String, start_command: String) -> GetJetStreamStatefulSetSpecReq { + GetJetStreamStatefulSetSpecReq { + client_port, + cluster_port, + config_map_name, + config_reloader_image, + default_resources, + labels, + metrics_exporter_image, + metrics_port, + monitor_port, + nats_image, + pvc_name_if_needed, + server_auth_secret_name, + server_encryption_secret_name, + service_name, + start_command, + } + } +} + + diff --git a/serving/numaflow-models/src/models/get_redis_service_spec_req.rs b/serving/numaflow-models/src/models/get_redis_service_spec_req.rs new file mode 100644 index 0000000000..651a67d08e --- /dev/null +++ b/serving/numaflow-models/src/models/get_redis_service_spec_req.rs @@ -0,0 +1,34 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetRedisServiceSpecReq { + #[serde(rename = "Labels")] + pub labels: ::std::collections::HashMap, + #[serde(rename = "RedisContainerPort")] + pub redis_container_port: i32, + #[serde(rename = "SentinelContainerPort")] + pub sentinel_container_port: i32, +} + +impl GetRedisServiceSpecReq { + pub fn new(labels: ::std::collections::HashMap, redis_container_port: i32, sentinel_container_port: i32) -> GetRedisServiceSpecReq { + GetRedisServiceSpecReq { + labels, + redis_container_port, + sentinel_container_port, + } + } +} + + diff --git a/serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs b/serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs new file mode 100644 index 0000000000..736ea82fa4 --- /dev/null +++ b/serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs @@ -0,0 +1,73 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetRedisStatefulSetSpecReq { + #[serde(rename = "ConfConfigMapName")] + pub conf_config_map_name: String, + #[serde(rename = "CredentialSecretName")] + pub credential_secret_name: String, + #[serde(rename = "DefaultResources")] + pub default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + #[serde(rename = "HealthConfigMapName")] + pub health_config_map_name: String, + #[serde(rename = "InitContainerImage")] + pub init_container_image: String, + #[serde(rename = "Labels")] + pub labels: ::std::collections::HashMap, + #[serde(rename = "MetricsExporterImage")] + pub metrics_exporter_image: String, + #[serde(rename = "PvcNameIfNeeded")] + pub pvc_name_if_needed: String, + #[serde(rename = "RedisContainerPort")] + pub redis_container_port: i32, + #[serde(rename = "RedisImage")] + pub redis_image: String, + #[serde(rename = "RedisMetricsContainerPort")] + pub redis_metrics_container_port: i32, + #[serde(rename = "ScriptsConfigMapName")] + pub scripts_config_map_name: String, + #[serde(rename = "SentinelContainerPort")] + pub sentinel_container_port: i32, + #[serde(rename = "SentinelImage")] + pub sentinel_image: String, + #[serde(rename = "ServiceName")] + pub service_name: String, + #[serde(rename = "TLSEnabled")] + pub tls_enabled: bool, +} + +impl GetRedisStatefulSetSpecReq { + pub fn new(conf_config_map_name: String, credential_secret_name: String, default_resources: k8s_openapi::api::core::v1::ResourceRequirements, health_config_map_name: String, init_container_image: String, labels: ::std::collections::HashMap, metrics_exporter_image: String, pvc_name_if_needed: String, redis_container_port: i32, redis_image: String, redis_metrics_container_port: i32, scripts_config_map_name: String, sentinel_container_port: i32, sentinel_image: String, service_name: String, tls_enabled: bool) -> GetRedisStatefulSetSpecReq { + GetRedisStatefulSetSpecReq { + conf_config_map_name, + credential_secret_name, + default_resources, + health_config_map_name, + init_container_image, + labels, + metrics_exporter_image, + pvc_name_if_needed, + redis_container_port, + redis_image, + redis_metrics_container_port, + scripts_config_map_name, + sentinel_container_port, + sentinel_image, + service_name, + tls_enabled, + } + } +} + + diff --git a/serving/numaflow-models/src/models/get_side_input_deployment_req.rs b/serving/numaflow-models/src/models/get_side_input_deployment_req.rs new file mode 100644 index 0000000000..a2d389d107 --- /dev/null +++ b/serving/numaflow-models/src/models/get_side_input_deployment_req.rs @@ -0,0 +1,40 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetSideInputDeploymentReq { + #[serde(rename = "DefaultResources")] + pub default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + #[serde(rename = "Env")] + pub env: Vec, + #[serde(rename = "ISBSvcType")] + pub isb_svc_type: String, + #[serde(rename = "Image")] + pub image: String, + #[serde(rename = "PullPolicy")] + pub pull_policy: String, +} + +impl GetSideInputDeploymentReq { + pub fn new(default_resources: k8s_openapi::api::core::v1::ResourceRequirements, env: Vec, isb_svc_type: String, image: String, pull_policy: String) -> GetSideInputDeploymentReq { + GetSideInputDeploymentReq { + default_resources, + env, + isb_svc_type, + image, + pull_policy, + } + } +} + + diff --git a/serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs b/serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs new file mode 100644 index 0000000000..26f6185385 --- /dev/null +++ b/serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs @@ -0,0 +1,49 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetVertexPodSpecReq { + #[serde(rename = "DefaultResources")] + pub default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + #[serde(rename = "Env")] + pub env: Vec, + #[serde(rename = "ISBSvcType")] + pub isb_svc_type: String, + #[serde(rename = "Image")] + pub image: String, + #[serde(rename = "PipelineSpec")] + pub pipeline_spec: Box, + #[serde(rename = "PullPolicy")] + pub pull_policy: String, + #[serde(rename = "ServingSourceStreamName")] + pub serving_source_stream_name: String, + #[serde(rename = "SideInputsStoreName")] + pub side_inputs_store_name: String, +} + +impl GetVertexPodSpecReq { + pub fn new(default_resources: k8s_openapi::api::core::v1::ResourceRequirements, env: Vec, isb_svc_type: String, image: String, pipeline_spec: crate::models::PipelineSpec, pull_policy: String, serving_source_stream_name: String, side_inputs_store_name: String) -> GetVertexPodSpecReq { + GetVertexPodSpecReq { + default_resources, + env, + isb_svc_type, + image, + pipeline_spec: Box::new(pipeline_spec), + pull_policy, + serving_source_stream_name, + side_inputs_store_name, + } + } +} + + diff --git a/serving/numaflow-models/src/models/group_by.rs b/serving/numaflow-models/src/models/group_by.rs new file mode 100644 index 0000000000..605b3bc61a --- /dev/null +++ b/serving/numaflow-models/src/models/group_by.rs @@ -0,0 +1,39 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// GroupBy : GroupBy indicates it is a reducer UDF + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GroupBy { + #[serde(rename = "allowedLateness", skip_serializing_if = "Option::is_none")] + pub allowed_lateness: Option, + #[serde(rename = "keyed", skip_serializing_if = "Option::is_none")] + pub keyed: Option, + #[serde(rename = "storage", skip_serializing_if = "Option::is_none")] + pub storage: Option>, + #[serde(rename = "window")] + pub window: Box, +} + +impl GroupBy { + /// GroupBy indicates it is a reducer UDF + pub fn new(window: crate::models::Window) -> GroupBy { + GroupBy { + allowed_lateness: None, + keyed: None, + storage: None, + window: Box::new(window), + } + } +} + + diff --git a/serving/numaflow-models/src/models/gssapi.rs b/serving/numaflow-models/src/models/gssapi.rs new file mode 100644 index 0000000000..c5fe33aad6 --- /dev/null +++ b/serving/numaflow-models/src/models/gssapi.rs @@ -0,0 +1,63 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// Gssapi : GSSAPI represents a SASL GSSAPI config + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Gssapi { + /// valid inputs - KRB5_USER_AUTH, KRB5_KEYTAB_AUTH Possible enum values: - `\"KRB5_KEYTAB_AUTH\"` represents the password method KRB5KeytabAuth = \"KRB5_KEYTAB_AUTH\" = 2 - `\"KRB5_USER_AUTH\"` represents the password method KRB5UserAuth = \"KRB5_USER_AUTH\" = 1 + #[serde(rename = "authType")] + pub auth_type: AuthType, + #[serde(rename = "kerberosConfigSecret", skip_serializing_if = "Option::is_none")] + pub kerberos_config_secret: Option, + #[serde(rename = "keytabSecret", skip_serializing_if = "Option::is_none")] + pub keytab_secret: Option, + #[serde(rename = "passwordSecret", skip_serializing_if = "Option::is_none")] + pub password_secret: Option, + #[serde(rename = "realm")] + pub realm: String, + #[serde(rename = "serviceName")] + pub service_name: String, + #[serde(rename = "usernameSecret")] + pub username_secret: k8s_openapi::api::core::v1::SecretKeySelector, +} + +impl Gssapi { + /// GSSAPI represents a SASL GSSAPI config + pub fn new(auth_type: AuthType, realm: String, service_name: String, username_secret: k8s_openapi::api::core::v1::SecretKeySelector) -> Gssapi { + Gssapi { + auth_type, + kerberos_config_secret: None, + keytab_secret: None, + password_secret: None, + realm, + service_name, + username_secret, + } + } +} + +/// valid inputs - KRB5_USER_AUTH, KRB5_KEYTAB_AUTH Possible enum values: - `\"KRB5_KEYTAB_AUTH\"` represents the password method KRB5KeytabAuth = \"KRB5_KEYTAB_AUTH\" = 2 - `\"KRB5_USER_AUTH\"` represents the password method KRB5UserAuth = \"KRB5_USER_AUTH\" = 1 +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum AuthType { + #[serde(rename = "KRB5_KEYTAB_AUTH")] + KeytabAuth, + #[serde(rename = "KRB5_USER_AUTH")] + UserAuth, +} + +impl Default for AuthType { + fn default() -> AuthType { + Self::KeytabAuth + } +} + diff --git a/serving/numaflow-models/src/models/http_source.rs b/serving/numaflow-models/src/models/http_source.rs new file mode 100644 index 0000000000..8a5b7c7c97 --- /dev/null +++ b/serving/numaflow-models/src/models/http_source.rs @@ -0,0 +1,32 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct HttpSource { + #[serde(rename = "auth", skip_serializing_if = "Option::is_none")] + pub auth: Option>, + /// Whether to create a ClusterIP Service + #[serde(rename = "service", skip_serializing_if = "Option::is_none")] + pub service: Option, +} + +impl HttpSource { + pub fn new() -> HttpSource { + HttpSource { + auth: None, + service: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/idle_source.rs b/serving/numaflow-models/src/models/idle_source.rs new file mode 100644 index 0000000000..3d593c5375 --- /dev/null +++ b/serving/numaflow-models/src/models/idle_source.rs @@ -0,0 +1,34 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct IdleSource { + #[serde(rename = "incrementBy", skip_serializing_if = "Option::is_none")] + pub increment_by: Option, + #[serde(rename = "stepInterval", skip_serializing_if = "Option::is_none")] + pub step_interval: Option, + #[serde(rename = "threshold", skip_serializing_if = "Option::is_none")] + pub threshold: Option, +} + +impl IdleSource { + pub fn new() -> IdleSource { + IdleSource { + increment_by: None, + step_interval: None, + threshold: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service.rs b/serving/numaflow-models/src/models/inter_step_buffer_service.rs new file mode 100644 index 0000000000..fc6154b756 --- /dev/null +++ b/serving/numaflow-models/src/models/inter_step_buffer_service.rs @@ -0,0 +1,42 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct InterStepBufferService { + /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + #[serde(rename = "apiVersion", skip_serializing_if = "Option::is_none")] + pub api_version: Option, + /// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(rename = "kind", skip_serializing_if = "Option::is_none")] + pub kind: Option, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option, + #[serde(rename = "spec")] + pub spec: Box, + #[serde(rename = "status", skip_serializing_if = "Option::is_none")] + pub status: Option>, +} + +impl InterStepBufferService { + pub fn new(spec: crate::models::InterStepBufferServiceSpec) -> InterStepBufferService { + InterStepBufferService { + api_version: None, + kind: None, + metadata: None, + spec: Box::new(spec), + status: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service_list.rs b/serving/numaflow-models/src/models/inter_step_buffer_service_list.rs new file mode 100644 index 0000000000..e933727ce5 --- /dev/null +++ b/serving/numaflow-models/src/models/inter_step_buffer_service_list.rs @@ -0,0 +1,41 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// InterStepBufferServiceList : InterStepBufferServiceList is the list of InterStepBufferService resources + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct InterStepBufferServiceList { + /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + #[serde(rename = "apiVersion", skip_serializing_if = "Option::is_none")] + pub api_version: Option, + #[serde(rename = "items")] + pub items: Vec, + /// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(rename = "kind", skip_serializing_if = "Option::is_none")] + pub kind: Option, + #[serde(rename = "metadata")] + pub metadata: k8s_openapi::apimachinery::pkg::apis::meta::v1::ListMeta, +} + +impl InterStepBufferServiceList { + /// InterStepBufferServiceList is the list of InterStepBufferService resources + pub fn new(items: Vec, metadata: k8s_openapi::apimachinery::pkg::apis::meta::v1::ListMeta) -> InterStepBufferServiceList { + InterStepBufferServiceList { + api_version: None, + items, + kind: None, + metadata, + } + } +} + + diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs b/serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs new file mode 100644 index 0000000000..79be40fa45 --- /dev/null +++ b/serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs @@ -0,0 +1,31 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct InterStepBufferServiceSpec { + #[serde(rename = "jetstream", skip_serializing_if = "Option::is_none")] + pub jetstream: Option>, + #[serde(rename = "redis", skip_serializing_if = "Option::is_none")] + pub redis: Option>, +} + +impl InterStepBufferServiceSpec { + pub fn new() -> InterStepBufferServiceSpec { + InterStepBufferServiceSpec { + jetstream: None, + redis: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service_status.rs b/serving/numaflow-models/src/models/inter_step_buffer_service_status.rs new file mode 100644 index 0000000000..6cf0a96745 --- /dev/null +++ b/serving/numaflow-models/src/models/inter_step_buffer_service_status.rs @@ -0,0 +1,44 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct InterStepBufferServiceStatus { + /// Conditions are the latest available observations of a resource's current state. + #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + #[serde(rename = "config", skip_serializing_if = "Option::is_none")] + pub config: Option>, + #[serde(rename = "message", skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(rename = "observedGeneration", skip_serializing_if = "Option::is_none")] + pub observed_generation: Option, + #[serde(rename = "phase", skip_serializing_if = "Option::is_none")] + pub phase: Option, + #[serde(rename = "type", skip_serializing_if = "Option::is_none")] + pub r#type: Option, +} + +impl InterStepBufferServiceStatus { + pub fn new() -> InterStepBufferServiceStatus { + InterStepBufferServiceStatus { + conditions: None, + config: None, + message: None, + observed_generation: None, + phase: None, + r#type: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/jet_stream_buffer_service.rs b/serving/numaflow-models/src/models/jet_stream_buffer_service.rs new file mode 100644 index 0000000000..9441f6d156 --- /dev/null +++ b/serving/numaflow-models/src/models/jet_stream_buffer_service.rs @@ -0,0 +1,113 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct JetStreamBufferService { + #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] + pub affinity: Option, + /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + pub automount_service_account_token: Option, + /// Optional configuration for the streams, consumers and buckets to be created in this JetStream service, if specified, it will be merged with the default configuration in numaflow-controller-config. It accepts a YAML format configuration, it may include 4 sections, \"stream\", \"consumer\", \"otBucket\" and \"procBucket\". Available fields under \"stream\" include \"retention\" (e.g. interest, limits, workerQueue), \"maxMsgs\", \"maxAge\" (e.g. 72h), \"replicas\" (1, 3, 5), \"duplicates\" (e.g. 5m). Available fields under \"consumer\" include \"ackWait\" (e.g. 60s) Available fields under \"otBucket\" include \"maxValueSize\", \"history\", \"ttl\" (e.g. 72h), \"maxBytes\", \"replicas\" (1, 3, 5). Available fields under \"procBucket\" include \"maxValueSize\", \"history\", \"ttl\" (e.g. 72h), \"maxBytes\", \"replicas\" (1, 3, 5). + #[serde(rename = "bufferConfig", skip_serializing_if = "Option::is_none")] + pub buffer_config: Option, + #[serde(rename = "containerTemplate", skip_serializing_if = "Option::is_none")] + pub container_template: Option>, + #[serde(rename = "dnsConfig", skip_serializing_if = "Option::is_none")] + pub dns_config: Option, + /// Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + #[serde(rename = "dnsPolicy", skip_serializing_if = "Option::is_none")] + pub dns_policy: Option, + /// Whether encrypt the data at rest, defaults to false Enabling encryption might impact the performance, see https://docs.nats.io/running-a-nats-service/nats_admin/jetstream_admin/encryption_at_rest for the detail Toggling the value will impact encrypting/decrypting existing messages. + #[serde(rename = "encryption", skip_serializing_if = "Option::is_none")] + pub encryption: Option, + /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] + pub image_pull_secrets: Option>, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option>, + #[serde(rename = "metricsContainerTemplate", skip_serializing_if = "Option::is_none")] + pub metrics_container_template: Option>, + /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + #[serde(rename = "nodeSelector", skip_serializing_if = "Option::is_none")] + pub node_selector: Option<::std::collections::HashMap>, + #[serde(rename = "persistence", skip_serializing_if = "Option::is_none")] + pub persistence: Option>, + /// The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priority", skip_serializing_if = "Option::is_none")] + pub priority: Option, + /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] + pub priority_class_name: Option, + #[serde(rename = "reloaderContainerTemplate", skip_serializing_if = "Option::is_none")] + pub reloader_container_template: Option>, + /// JetStream StatefulSet size + #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] + pub runtime_class_name: Option, + #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] + pub security_context: Option, + /// ServiceAccountName applied to the pod + #[serde(rename = "serviceAccountName", skip_serializing_if = "Option::is_none")] + pub service_account_name: Option, + /// Nats/JetStream configuration, if not specified, global settings in numaflow-controller-config will be used. See https://docs.nats.io/running-a-nats-service/configuration#limits and https://docs.nats.io/running-a-nats-service/configuration#jetstream. For limits, only \"max_payload\" is supported for configuration, defaults to 1048576 (1MB), not recommended to use values over 8388608 (8MB) but max_payload can be set up to 67108864 (64MB). For jetstream, only \"max_memory_store\" and \"max_file_store\" are supported for configuration, do not set \"store_dir\" as it has been hardcoded. + #[serde(rename = "settings", skip_serializing_if = "Option::is_none")] + pub settings: Option, + /// Optional arguments to start nats-server. For example, \"-D\" to enable debugging output, \"-DV\" to enable debugging and tracing. Check https://docs.nats.io/ for all the available arguments. + #[serde(rename = "startArgs", skip_serializing_if = "Option::is_none")] + pub start_args: Option>, + /// Whether enable TLS, defaults to false Enabling TLS might impact the performance + #[serde(rename = "tls", skip_serializing_if = "Option::is_none")] + pub tls: Option, + /// If specified, the pod's tolerations. + #[serde(rename = "tolerations", skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, + /// JetStream version, such as \"2.7.1\" + #[serde(rename = "version", skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +impl JetStreamBufferService { + pub fn new() -> JetStreamBufferService { + JetStreamBufferService { + affinity: None, + automount_service_account_token: None, + buffer_config: None, + container_template: None, + dns_config: None, + dns_policy: None, + encryption: None, + image_pull_secrets: None, + metadata: None, + metrics_container_template: None, + node_selector: None, + persistence: None, + priority: None, + priority_class_name: None, + reloader_container_template: None, + replicas: None, + runtime_class_name: None, + security_context: None, + service_account_name: None, + settings: None, + start_args: None, + tls: None, + tolerations: None, + version: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/jet_stream_config.rs b/serving/numaflow-models/src/models/jet_stream_config.rs new file mode 100644 index 0000000000..bc4c648c1a --- /dev/null +++ b/serving/numaflow-models/src/models/jet_stream_config.rs @@ -0,0 +1,39 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct JetStreamConfig { + #[serde(rename = "auth", skip_serializing_if = "Option::is_none")] + pub auth: Option>, + #[serde(rename = "streamConfig", skip_serializing_if = "Option::is_none")] + pub stream_config: Option, + /// TLS enabled or not + #[serde(rename = "tlsEnabled", skip_serializing_if = "Option::is_none")] + pub tls_enabled: Option, + /// JetStream (NATS) URL + #[serde(rename = "url", skip_serializing_if = "Option::is_none")] + pub url: Option, +} + +impl JetStreamConfig { + pub fn new() -> JetStreamConfig { + JetStreamConfig { + auth: None, + stream_config: None, + tls_enabled: None, + url: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/jet_stream_source.rs b/serving/numaflow-models/src/models/jet_stream_source.rs new file mode 100644 index 0000000000..6da52b583c --- /dev/null +++ b/serving/numaflow-models/src/models/jet_stream_source.rs @@ -0,0 +1,39 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct JetStreamSource { + #[serde(rename = "auth", skip_serializing_if = "Option::is_none")] + pub auth: Option>, + /// Stream represents the name of the stream. + #[serde(rename = "stream")] + pub stream: String, + #[serde(rename = "tls", skip_serializing_if = "Option::is_none")] + pub tls: Option>, + /// URL to connect to NATS cluster, multiple urls could be separated by comma. + #[serde(rename = "url")] + pub url: String, +} + +impl JetStreamSource { + pub fn new(stream: String, url: String) -> JetStreamSource { + JetStreamSource { + auth: None, + stream, + tls: None, + url, + } + } +} + + diff --git a/serving/numaflow-models/src/models/job_template.rs b/serving/numaflow-models/src/models/job_template.rs new file mode 100644 index 0000000000..dcae7d906a --- /dev/null +++ b/serving/numaflow-models/src/models/job_template.rs @@ -0,0 +1,84 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct JobTemplate { + #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] + pub affinity: Option, + /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + pub automount_service_account_token: Option, + /// Specifies the number of retries before marking this job failed. More info: https://kubernetes.io/docs/concepts/workloads/controllers/job/#pod-backoff-failure-policy Numaflow defaults to 20 + #[serde(rename = "backoffLimit", skip_serializing_if = "Option::is_none")] + pub backoff_limit: Option, + #[serde(rename = "containerTemplate", skip_serializing_if = "Option::is_none")] + pub container_template: Option>, + #[serde(rename = "dnsConfig", skip_serializing_if = "Option::is_none")] + pub dns_config: Option, + /// Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + #[serde(rename = "dnsPolicy", skip_serializing_if = "Option::is_none")] + pub dns_policy: Option, + /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] + pub image_pull_secrets: Option>, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option>, + /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + #[serde(rename = "nodeSelector", skip_serializing_if = "Option::is_none")] + pub node_selector: Option<::std::collections::HashMap>, + /// The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priority", skip_serializing_if = "Option::is_none")] + pub priority: Option, + /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] + pub priority_class_name: Option, + /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] + pub runtime_class_name: Option, + #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] + pub security_context: Option, + /// ServiceAccountName applied to the pod + #[serde(rename = "serviceAccountName", skip_serializing_if = "Option::is_none")] + pub service_account_name: Option, + /// If specified, the pod's tolerations. + #[serde(rename = "tolerations", skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, + /// ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. Numaflow defaults to 30 + #[serde(rename = "ttlSecondsAfterFinished", skip_serializing_if = "Option::is_none")] + pub ttl_seconds_after_finished: Option, +} + +impl JobTemplate { + pub fn new() -> JobTemplate { + JobTemplate { + affinity: None, + automount_service_account_token: None, + backoff_limit: None, + container_template: None, + dns_config: None, + dns_policy: None, + image_pull_secrets: None, + metadata: None, + node_selector: None, + priority: None, + priority_class_name: None, + runtime_class_name: None, + security_context: None, + service_account_name: None, + tolerations: None, + ttl_seconds_after_finished: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/kafka_sink.rs b/serving/numaflow-models/src/models/kafka_sink.rs new file mode 100644 index 0000000000..3b2c730fa5 --- /dev/null +++ b/serving/numaflow-models/src/models/kafka_sink.rs @@ -0,0 +1,40 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct KafkaSink { + #[serde(rename = "brokers", skip_serializing_if = "Option::is_none")] + pub brokers: Option>, + #[serde(rename = "config", skip_serializing_if = "Option::is_none")] + pub config: Option, + #[serde(rename = "sasl", skip_serializing_if = "Option::is_none")] + pub sasl: Option>, + #[serde(rename = "tls", skip_serializing_if = "Option::is_none")] + pub tls: Option>, + #[serde(rename = "topic")] + pub topic: String, +} + +impl KafkaSink { + pub fn new(topic: String) -> KafkaSink { + KafkaSink { + brokers: None, + config: None, + sasl: None, + tls: None, + topic, + } + } +} + + diff --git a/serving/numaflow-models/src/models/kafka_source.rs b/serving/numaflow-models/src/models/kafka_source.rs new file mode 100644 index 0000000000..078eeee69a --- /dev/null +++ b/serving/numaflow-models/src/models/kafka_source.rs @@ -0,0 +1,43 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct KafkaSource { + #[serde(rename = "brokers", skip_serializing_if = "Option::is_none")] + pub brokers: Option>, + #[serde(rename = "config", skip_serializing_if = "Option::is_none")] + pub config: Option, + #[serde(rename = "consumerGroup", skip_serializing_if = "Option::is_none")] + pub consumer_group: Option, + #[serde(rename = "sasl", skip_serializing_if = "Option::is_none")] + pub sasl: Option>, + #[serde(rename = "tls", skip_serializing_if = "Option::is_none")] + pub tls: Option>, + #[serde(rename = "topic")] + pub topic: String, +} + +impl KafkaSource { + pub fn new(topic: String) -> KafkaSource { + KafkaSource { + brokers: None, + config: None, + consumer_group: None, + sasl: None, + tls: None, + topic, + } + } +} + + diff --git a/serving/numaflow-models/src/models/lifecycle.rs b/serving/numaflow-models/src/models/lifecycle.rs new file mode 100644 index 0000000000..3c1a478171 --- /dev/null +++ b/serving/numaflow-models/src/models/lifecycle.rs @@ -0,0 +1,37 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Lifecycle { + /// DeleteGracePeriodSeconds used to delete pipeline gracefully + #[serde(rename = "deleteGracePeriodSeconds", skip_serializing_if = "Option::is_none")] + pub delete_grace_period_seconds: Option, + /// DesiredPhase used to bring the pipeline from current phase to desired phase + #[serde(rename = "desiredPhase", skip_serializing_if = "Option::is_none")] + pub desired_phase: Option, + /// PauseGracePeriodSeconds used to pause pipeline gracefully + #[serde(rename = "pauseGracePeriodSeconds", skip_serializing_if = "Option::is_none")] + pub pause_grace_period_seconds: Option, +} + +impl Lifecycle { + pub fn new() -> Lifecycle { + Lifecycle { + delete_grace_period_seconds: None, + desired_phase: None, + pause_grace_period_seconds: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/log.rs b/serving/numaflow-models/src/models/log.rs new file mode 100644 index 0000000000..141bb39e94 --- /dev/null +++ b/serving/numaflow-models/src/models/log.rs @@ -0,0 +1,25 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Log { +} + +impl Log { + pub fn new() -> Log { + Log { + } + } +} + + diff --git a/serving/numaflow-models/src/models/metadata.rs b/serving/numaflow-models/src/models/metadata.rs new file mode 100644 index 0000000000..13c95358f6 --- /dev/null +++ b/serving/numaflow-models/src/models/metadata.rs @@ -0,0 +1,31 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Metadata { + #[serde(rename = "annotations", skip_serializing_if = "Option::is_none")] + pub annotations: Option<::std::collections::HashMap>, + #[serde(rename = "labels", skip_serializing_if = "Option::is_none")] + pub labels: Option<::std::collections::HashMap>, +} + +impl Metadata { + pub fn new() -> Metadata { + Metadata { + annotations: None, + labels: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/mod.rs b/serving/numaflow-models/src/models/mod.rs new file mode 100644 index 0000000000..5725454d44 --- /dev/null +++ b/serving/numaflow-models/src/models/mod.rs @@ -0,0 +1,172 @@ +pub mod abstract_pod_template; +pub use self::abstract_pod_template::AbstractPodTemplate; +pub mod abstract_sink; +pub use self::abstract_sink::AbstractSink; +pub mod abstract_vertex; +pub use self::abstract_vertex::AbstractVertex; +pub mod authorization; +pub use self::authorization::Authorization; +pub mod basic_auth; +pub use self::basic_auth::BasicAuth; +pub mod blackhole; +pub use self::blackhole::Blackhole; +pub mod buffer_service_config; +pub use self::buffer_service_config::BufferServiceConfig; +pub mod combined_edge; +pub use self::combined_edge::CombinedEdge; +pub mod container; +pub use self::container::Container; +pub mod container_builder; +pub use self::container_builder::ContainerBuilder; +pub mod container_template; +pub use self::container_template::ContainerTemplate; +pub mod daemon_template; +pub use self::daemon_template::DaemonTemplate; +pub mod edge; +pub use self::edge::Edge; +pub mod fixed_window; +pub use self::fixed_window::FixedWindow; +pub mod forward_conditions; +pub use self::forward_conditions::ForwardConditions; +pub mod function; +pub use self::function::Function; +pub mod generator_source; +pub use self::generator_source::GeneratorSource; +pub mod get_container_req; +pub use self::get_container_req::GetContainerReq; +pub mod get_daemon_deployment_req; +pub use self::get_daemon_deployment_req::GetDaemonDeploymentReq; +pub mod get_jet_stream_service_spec_req; +pub use self::get_jet_stream_service_spec_req::GetJetStreamServiceSpecReq; +pub mod get_jet_stream_stateful_set_spec_req; +pub use self::get_jet_stream_stateful_set_spec_req::GetJetStreamStatefulSetSpecReq; +pub mod get_redis_service_spec_req; +pub use self::get_redis_service_spec_req::GetRedisServiceSpecReq; +pub mod get_redis_stateful_set_spec_req; +pub use self::get_redis_stateful_set_spec_req::GetRedisStatefulSetSpecReq; +pub mod get_side_input_deployment_req; +pub use self::get_side_input_deployment_req::GetSideInputDeploymentReq; +pub mod get_vertex_pod_spec_req; +pub use self::get_vertex_pod_spec_req::GetVertexPodSpecReq; +pub mod group_by; +pub use self::group_by::GroupBy; +pub mod gssapi; +pub use self::gssapi::Gssapi; +pub mod http_source; +pub use self::http_source::HttpSource; +pub mod idle_source; +pub use self::idle_source::IdleSource; +pub mod inter_step_buffer_service; +pub use self::inter_step_buffer_service::InterStepBufferService; +pub mod inter_step_buffer_service_list; +pub use self::inter_step_buffer_service_list::InterStepBufferServiceList; +pub mod inter_step_buffer_service_spec; +pub use self::inter_step_buffer_service_spec::InterStepBufferServiceSpec; +pub mod inter_step_buffer_service_status; +pub use self::inter_step_buffer_service_status::InterStepBufferServiceStatus; +pub mod jet_stream_buffer_service; +pub use self::jet_stream_buffer_service::JetStreamBufferService; +pub mod jet_stream_config; +pub use self::jet_stream_config::JetStreamConfig; +pub mod jet_stream_source; +pub use self::jet_stream_source::JetStreamSource; +pub mod job_template; +pub use self::job_template::JobTemplate; +pub mod kafka_sink; +pub use self::kafka_sink::KafkaSink; +pub mod kafka_source; +pub use self::kafka_source::KafkaSource; +pub mod lifecycle; +pub use self::lifecycle::Lifecycle; +pub mod log; +pub use self::log::Log; +pub mod metadata; +pub use self::metadata::Metadata; +pub mod native_redis; +pub use self::native_redis::NativeRedis; +pub mod nats_auth; +pub use self::nats_auth::NatsAuth; +pub mod nats_source; +pub use self::nats_source::NatsSource; +pub mod no_store; +pub use self::no_store::NoStore; +pub mod pbq_storage; +pub use self::pbq_storage::PbqStorage; +pub mod persistence_strategy; +pub use self::persistence_strategy::PersistenceStrategy; +pub mod pipeline; +pub use self::pipeline::Pipeline; +pub mod pipeline_limits; +pub use self::pipeline_limits::PipelineLimits; +pub mod pipeline_list; +pub use self::pipeline_list::PipelineList; +pub mod pipeline_spec; +pub use self::pipeline_spec::PipelineSpec; +pub mod pipeline_status; +pub use self::pipeline_status::PipelineStatus; +pub mod redis_buffer_service; +pub use self::redis_buffer_service::RedisBufferService; +pub mod redis_config; +pub use self::redis_config::RedisConfig; +pub mod redis_settings; +pub use self::redis_settings::RedisSettings; +pub mod sasl; +pub use self::sasl::Sasl; +pub mod sasl_plain; +pub use self::sasl_plain::SaslPlain; +pub mod scale; +pub use self::scale::Scale; +pub mod serving_source; +pub use self::serving_source::ServingSource; +pub mod serving_store; +pub use self::serving_store::ServingStore; +pub mod session_window; +pub use self::session_window::SessionWindow; +pub mod side_input; +pub use self::side_input::SideInput; +pub mod side_input_trigger; +pub use self::side_input_trigger::SideInputTrigger; +pub mod side_inputs_manager_template; +pub use self::side_inputs_manager_template::SideInputsManagerTemplate; +pub mod sink; +pub use self::sink::Sink; +pub mod sliding_window; +pub use self::sliding_window::SlidingWindow; +pub mod source; +pub use self::source::Source; +pub mod status; +pub use self::status::Status; +pub mod tag_conditions; +pub use self::tag_conditions::TagConditions; +pub mod templates; +pub use self::templates::Templates; +pub mod tls; +pub use self::tls::Tls; +pub mod transformer; +pub use self::transformer::Transformer; +pub mod ud_sink; +pub use self::ud_sink::UdSink; +pub mod ud_source; +pub use self::ud_source::UdSource; +pub mod ud_transformer; +pub use self::ud_transformer::UdTransformer; +pub mod udf; +pub use self::udf::Udf; +pub mod vertex; +pub use self::vertex::Vertex; +pub mod vertex_instance; +pub use self::vertex_instance::VertexInstance; +pub mod vertex_limits; +pub use self::vertex_limits::VertexLimits; +pub mod vertex_list; +pub use self::vertex_list::VertexList; +pub mod vertex_spec; +pub use self::vertex_spec::VertexSpec; +pub mod vertex_status; +pub use self::vertex_status::VertexStatus; +pub mod vertex_template; +pub use self::vertex_template::VertexTemplate; +pub mod watermark; +pub use self::watermark::Watermark; +pub mod window; +pub use self::window::Window; diff --git a/serving/numaflow-models/src/models/native_redis.rs b/serving/numaflow-models/src/models/native_redis.rs new file mode 100644 index 0000000000..c538f13b2a --- /dev/null +++ b/serving/numaflow-models/src/models/native_redis.rs @@ -0,0 +1,99 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct NativeRedis { + #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] + pub affinity: Option, + /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + pub automount_service_account_token: Option, + #[serde(rename = "dnsConfig", skip_serializing_if = "Option::is_none")] + pub dns_config: Option, + /// Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + #[serde(rename = "dnsPolicy", skip_serializing_if = "Option::is_none")] + pub dns_policy: Option, + /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] + pub image_pull_secrets: Option>, + #[serde(rename = "initContainerTemplate", skip_serializing_if = "Option::is_none")] + pub init_container_template: Option>, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option>, + #[serde(rename = "metricsContainerTemplate", skip_serializing_if = "Option::is_none")] + pub metrics_container_template: Option>, + /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + #[serde(rename = "nodeSelector", skip_serializing_if = "Option::is_none")] + pub node_selector: Option<::std::collections::HashMap>, + #[serde(rename = "persistence", skip_serializing_if = "Option::is_none")] + pub persistence: Option>, + /// The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priority", skip_serializing_if = "Option::is_none")] + pub priority: Option, + /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] + pub priority_class_name: Option, + #[serde(rename = "redisContainerTemplate", skip_serializing_if = "Option::is_none")] + pub redis_container_template: Option>, + /// Redis StatefulSet size + #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] + pub runtime_class_name: Option, + #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] + pub security_context: Option, + #[serde(rename = "sentinelContainerTemplate", skip_serializing_if = "Option::is_none")] + pub sentinel_container_template: Option>, + /// ServiceAccountName applied to the pod + #[serde(rename = "serviceAccountName", skip_serializing_if = "Option::is_none")] + pub service_account_name: Option, + #[serde(rename = "settings", skip_serializing_if = "Option::is_none")] + pub settings: Option>, + /// If specified, the pod's tolerations. + #[serde(rename = "tolerations", skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, + /// Redis version, such as \"6.0.16\" + #[serde(rename = "version", skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +impl NativeRedis { + pub fn new() -> NativeRedis { + NativeRedis { + affinity: None, + automount_service_account_token: None, + dns_config: None, + dns_policy: None, + image_pull_secrets: None, + init_container_template: None, + metadata: None, + metrics_container_template: None, + node_selector: None, + persistence: None, + priority: None, + priority_class_name: None, + redis_container_template: None, + replicas: None, + runtime_class_name: None, + security_context: None, + sentinel_container_template: None, + service_account_name: None, + settings: None, + tolerations: None, + version: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/nats_auth.rs b/serving/numaflow-models/src/models/nats_auth.rs new file mode 100644 index 0000000000..8c5c977109 --- /dev/null +++ b/serving/numaflow-models/src/models/nats_auth.rs @@ -0,0 +1,36 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// NatsAuth : NatsAuth defines how to authenticate the nats access + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct NatsAuth { + #[serde(rename = "basic", skip_serializing_if = "Option::is_none")] + pub basic: Option>, + #[serde(rename = "nkey", skip_serializing_if = "Option::is_none")] + pub nkey: Option, + #[serde(rename = "token", skip_serializing_if = "Option::is_none")] + pub token: Option, +} + +impl NatsAuth { + /// NatsAuth defines how to authenticate the nats access + pub fn new() -> NatsAuth { + NatsAuth { + basic: None, + nkey: None, + token: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/nats_source.rs b/serving/numaflow-models/src/models/nats_source.rs new file mode 100644 index 0000000000..d60f175c23 --- /dev/null +++ b/serving/numaflow-models/src/models/nats_source.rs @@ -0,0 +1,43 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct NatsSource { + #[serde(rename = "auth", skip_serializing_if = "Option::is_none")] + pub auth: Option>, + /// Queue is used for queue subscription. + #[serde(rename = "queue")] + pub queue: String, + /// Subject holds the name of the subject onto which messages are published. + #[serde(rename = "subject")] + pub subject: String, + #[serde(rename = "tls", skip_serializing_if = "Option::is_none")] + pub tls: Option>, + /// URL to connect to NATS cluster, multiple urls could be separated by comma. + #[serde(rename = "url")] + pub url: String, +} + +impl NatsSource { + pub fn new(queue: String, subject: String, url: String) -> NatsSource { + NatsSource { + auth: None, + queue, + subject, + tls: None, + url, + } + } +} + + diff --git a/serving/numaflow-models/src/models/no_store.rs b/serving/numaflow-models/src/models/no_store.rs new file mode 100644 index 0000000000..187e93d96b --- /dev/null +++ b/serving/numaflow-models/src/models/no_store.rs @@ -0,0 +1,27 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// NoStore : NoStore means there will be no persistence storage and there will be data loss during pod restarts. Use this option only if you do not care about correctness (e.g., approx statistics pipeline like sampling rate, etc.). + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct NoStore { +} + +impl NoStore { + /// NoStore means there will be no persistence storage and there will be data loss during pod restarts. Use this option only if you do not care about correctness (e.g., approx statistics pipeline like sampling rate, etc.). + pub fn new() -> NoStore { + NoStore { + } + } +} + + diff --git a/serving/numaflow-models/src/models/pbq_storage.rs b/serving/numaflow-models/src/models/pbq_storage.rs new file mode 100644 index 0000000000..ea9cb9514e --- /dev/null +++ b/serving/numaflow-models/src/models/pbq_storage.rs @@ -0,0 +1,36 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// PbqStorage : PBQStorage defines the persistence configuration for a vertex. + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct PbqStorage { + #[serde(rename = "emptyDir", skip_serializing_if = "Option::is_none")] + pub empty_dir: Option, + #[serde(rename = "no_store", skip_serializing_if = "Option::is_none")] + pub no_store: Option>, + #[serde(rename = "persistentVolumeClaim", skip_serializing_if = "Option::is_none")] + pub persistent_volume_claim: Option>, +} + +impl PbqStorage { + /// PBQStorage defines the persistence configuration for a vertex. + pub fn new() -> PbqStorage { + PbqStorage { + empty_dir: None, + no_store: None, + persistent_volume_claim: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/persistence_strategy.rs b/serving/numaflow-models/src/models/persistence_strategy.rs new file mode 100644 index 0000000000..49351a146d --- /dev/null +++ b/serving/numaflow-models/src/models/persistence_strategy.rs @@ -0,0 +1,38 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// PersistenceStrategy : PersistenceStrategy defines the strategy of persistence + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct PersistenceStrategy { + /// Available access modes such as ReadWriteOnce, ReadWriteMany https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes + #[serde(rename = "accessMode", skip_serializing_if = "Option::is_none")] + pub access_mode: Option, + /// Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + #[serde(rename = "storageClassName", skip_serializing_if = "Option::is_none")] + pub storage_class_name: Option, + #[serde(rename = "volumeSize", skip_serializing_if = "Option::is_none")] + pub volume_size: Option, +} + +impl PersistenceStrategy { + /// PersistenceStrategy defines the strategy of persistence + pub fn new() -> PersistenceStrategy { + PersistenceStrategy { + access_mode: None, + storage_class_name: None, + volume_size: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/pipeline.rs b/serving/numaflow-models/src/models/pipeline.rs new file mode 100644 index 0000000000..e6d7054b36 --- /dev/null +++ b/serving/numaflow-models/src/models/pipeline.rs @@ -0,0 +1,42 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Pipeline { + /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + #[serde(rename = "apiVersion", skip_serializing_if = "Option::is_none")] + pub api_version: Option, + /// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(rename = "kind", skip_serializing_if = "Option::is_none")] + pub kind: Option, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option, + #[serde(rename = "spec")] + pub spec: Box, + #[serde(rename = "status", skip_serializing_if = "Option::is_none")] + pub status: Option>, +} + +impl Pipeline { + pub fn new(spec: crate::models::PipelineSpec) -> Pipeline { + Pipeline { + api_version: None, + kind: None, + metadata: None, + spec: Box::new(spec), + status: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/pipeline_limits.rs b/serving/numaflow-models/src/models/pipeline_limits.rs new file mode 100644 index 0000000000..3e3a2e58f1 --- /dev/null +++ b/serving/numaflow-models/src/models/pipeline_limits.rs @@ -0,0 +1,40 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct PipelineLimits { + /// BufferMaxLength is used to define the max length of a buffer. Only applies to UDF and Source vertices as only they do buffer write. It can be overridden by the settings in vertex limits. + #[serde(rename = "bufferMaxLength", skip_serializing_if = "Option::is_none")] + pub buffer_max_length: Option, + /// BufferUsageLimit is used to define the percentage of the buffer usage limit, a valid value should be less than 100, for example, 85. Only applies to UDF and Source vertices as only they do buffer write. It will be overridden by the settings in vertex limits. + #[serde(rename = "bufferUsageLimit", skip_serializing_if = "Option::is_none")] + pub buffer_usage_limit: Option, + /// Read batch size for all the vertices in the pipeline, can be overridden by the vertex's limit settings. + #[serde(rename = "readBatchSize", skip_serializing_if = "Option::is_none")] + pub read_batch_size: Option, + #[serde(rename = "readTimeout", skip_serializing_if = "Option::is_none")] + pub read_timeout: Option, +} + +impl PipelineLimits { + pub fn new() -> PipelineLimits { + PipelineLimits { + buffer_max_length: None, + buffer_usage_limit: None, + read_batch_size: None, + read_timeout: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/pipeline_list.rs b/serving/numaflow-models/src/models/pipeline_list.rs new file mode 100644 index 0000000000..a159018e5e --- /dev/null +++ b/serving/numaflow-models/src/models/pipeline_list.rs @@ -0,0 +1,39 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct PipelineList { + /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + #[serde(rename = "apiVersion", skip_serializing_if = "Option::is_none")] + pub api_version: Option, + #[serde(rename = "items")] + pub items: Vec, + /// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(rename = "kind", skip_serializing_if = "Option::is_none")] + pub kind: Option, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option, +} + +impl PipelineList { + pub fn new(items: Vec) -> PipelineList { + PipelineList { + api_version: None, + items, + kind: None, + metadata: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/pipeline_spec.rs b/serving/numaflow-models/src/models/pipeline_spec.rs new file mode 100644 index 0000000000..66c29acf08 --- /dev/null +++ b/serving/numaflow-models/src/models/pipeline_spec.rs @@ -0,0 +1,51 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct PipelineSpec { + /// Edges define the relationships between vertices + #[serde(rename = "edges", skip_serializing_if = "Option::is_none")] + pub edges: Option>, + #[serde(rename = "interStepBufferServiceName", skip_serializing_if = "Option::is_none")] + pub inter_step_buffer_service_name: Option, + #[serde(rename = "lifecycle", skip_serializing_if = "Option::is_none")] + pub lifecycle: Option>, + #[serde(rename = "limits", skip_serializing_if = "Option::is_none")] + pub limits: Option>, + /// SideInputs defines the Side Inputs of a pipeline. + #[serde(rename = "sideInputs", skip_serializing_if = "Option::is_none")] + pub side_inputs: Option>, + #[serde(rename = "templates", skip_serializing_if = "Option::is_none")] + pub templates: Option>, + #[serde(rename = "vertices", skip_serializing_if = "Option::is_none")] + pub vertices: Option>, + #[serde(rename = "watermark", skip_serializing_if = "Option::is_none")] + pub watermark: Option>, +} + +impl PipelineSpec { + pub fn new() -> PipelineSpec { + PipelineSpec { + edges: None, + inter_step_buffer_service_name: None, + lifecycle: None, + limits: None, + side_inputs: None, + templates: None, + vertices: None, + watermark: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/pipeline_status.rs b/serving/numaflow-models/src/models/pipeline_status.rs new file mode 100644 index 0000000000..64e8934fab --- /dev/null +++ b/serving/numaflow-models/src/models/pipeline_status.rs @@ -0,0 +1,59 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct PipelineStatus { + /// Conditions are the latest available observations of a resource's current state. + #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + #[serde(rename = "lastUpdated", skip_serializing_if = "Option::is_none")] + pub last_updated: Option, + #[serde(rename = "mapUDFCount", skip_serializing_if = "Option::is_none")] + pub map_udf_count: Option, + #[serde(rename = "message", skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(rename = "observedGeneration", skip_serializing_if = "Option::is_none")] + pub observed_generation: Option, + #[serde(rename = "phase", skip_serializing_if = "Option::is_none")] + pub phase: Option, + #[serde(rename = "reduceUDFCount", skip_serializing_if = "Option::is_none")] + pub reduce_udf_count: Option, + #[serde(rename = "sinkCount", skip_serializing_if = "Option::is_none")] + pub sink_count: Option, + #[serde(rename = "sourceCount", skip_serializing_if = "Option::is_none")] + pub source_count: Option, + #[serde(rename = "udfCount", skip_serializing_if = "Option::is_none")] + pub udf_count: Option, + #[serde(rename = "vertexCount", skip_serializing_if = "Option::is_none")] + pub vertex_count: Option, +} + +impl PipelineStatus { + pub fn new() -> PipelineStatus { + PipelineStatus { + conditions: None, + last_updated: None, + map_udf_count: None, + message: None, + observed_generation: None, + phase: None, + reduce_udf_count: None, + sink_count: None, + source_count: None, + udf_count: None, + vertex_count: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/redis_buffer_service.rs b/serving/numaflow-models/src/models/redis_buffer_service.rs new file mode 100644 index 0000000000..10994749ff --- /dev/null +++ b/serving/numaflow-models/src/models/redis_buffer_service.rs @@ -0,0 +1,31 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct RedisBufferService { + #[serde(rename = "external", skip_serializing_if = "Option::is_none")] + pub external: Option>, + #[serde(rename = "native", skip_serializing_if = "Option::is_none")] + pub native: Option>, +} + +impl RedisBufferService { + pub fn new() -> RedisBufferService { + RedisBufferService { + external: None, + native: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/redis_config.rs b/serving/numaflow-models/src/models/redis_config.rs new file mode 100644 index 0000000000..9c6c5180e6 --- /dev/null +++ b/serving/numaflow-models/src/models/redis_config.rs @@ -0,0 +1,47 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct RedisConfig { + /// Only required when Sentinel is used + #[serde(rename = "masterName", skip_serializing_if = "Option::is_none")] + pub master_name: Option, + #[serde(rename = "password", skip_serializing_if = "Option::is_none")] + pub password: Option, + #[serde(rename = "sentinelPassword", skip_serializing_if = "Option::is_none")] + pub sentinel_password: Option, + /// Sentinel URL, will be ignored if Redis URL is provided + #[serde(rename = "sentinelUrl", skip_serializing_if = "Option::is_none")] + pub sentinel_url: Option, + /// Redis URL + #[serde(rename = "url", skip_serializing_if = "Option::is_none")] + pub url: Option, + /// Redis user + #[serde(rename = "user", skip_serializing_if = "Option::is_none")] + pub user: Option, +} + +impl RedisConfig { + pub fn new() -> RedisConfig { + RedisConfig { + master_name: None, + password: None, + sentinel_password: None, + sentinel_url: None, + url: None, + user: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/redis_settings.rs b/serving/numaflow-models/src/models/redis_settings.rs new file mode 100644 index 0000000000..3b8e813a28 --- /dev/null +++ b/serving/numaflow-models/src/models/redis_settings.rs @@ -0,0 +1,41 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct RedisSettings { + /// Special settings for Redis master node, will override the global settings from controller config + #[serde(rename = "master", skip_serializing_if = "Option::is_none")] + pub master: Option, + /// Redis settings shared by both master and slaves, will override the global settings from controller config + #[serde(rename = "redis", skip_serializing_if = "Option::is_none")] + pub redis: Option, + /// Special settings for Redis replica nodes, will override the global settings from controller config + #[serde(rename = "replica", skip_serializing_if = "Option::is_none")] + pub replica: Option, + /// Sentinel settings, will override the global settings from controller config + #[serde(rename = "sentinel", skip_serializing_if = "Option::is_none")] + pub sentinel: Option, +} + +impl RedisSettings { + pub fn new() -> RedisSettings { + RedisSettings { + master: None, + redis: None, + replica: None, + sentinel: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/sasl.rs b/serving/numaflow-models/src/models/sasl.rs new file mode 100644 index 0000000000..163eb8a1b0 --- /dev/null +++ b/serving/numaflow-models/src/models/sasl.rs @@ -0,0 +1,41 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Sasl { + #[serde(rename = "gssapi", skip_serializing_if = "Option::is_none")] + pub gssapi: Option>, + /// SASL mechanism to use + #[serde(rename = "mechanism")] + pub mechanism: String, + #[serde(rename = "plain", skip_serializing_if = "Option::is_none")] + pub plain: Option>, + #[serde(rename = "scramsha256", skip_serializing_if = "Option::is_none")] + pub scramsha256: Option>, + #[serde(rename = "scramsha512", skip_serializing_if = "Option::is_none")] + pub scramsha512: Option>, +} + +impl Sasl { + pub fn new(mechanism: String) -> Sasl { + Sasl { + gssapi: None, + mechanism, + plain: None, + scramsha256: None, + scramsha512: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/sasl_plain.rs b/serving/numaflow-models/src/models/sasl_plain.rs new file mode 100644 index 0000000000..085581f975 --- /dev/null +++ b/serving/numaflow-models/src/models/sasl_plain.rs @@ -0,0 +1,34 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SaslPlain { + #[serde(rename = "handshake")] + pub handshake: bool, + #[serde(rename = "passwordSecret", skip_serializing_if = "Option::is_none")] + pub password_secret: Option, + #[serde(rename = "userSecret")] + pub user_secret: k8s_openapi::api::core::v1::SecretKeySelector, +} + +impl SaslPlain { + pub fn new(handshake: bool, user_secret: k8s_openapi::api::core::v1::SecretKeySelector) -> SaslPlain { + SaslPlain { + handshake, + password_secret: None, + user_secret, + } + } +} + + diff --git a/serving/numaflow-models/src/models/scale.rs b/serving/numaflow-models/src/models/scale.rs new file mode 100644 index 0000000000..7510676ce1 --- /dev/null +++ b/serving/numaflow-models/src/models/scale.rs @@ -0,0 +1,71 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// Scale : Scale defines the parameters for autoscaling. + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Scale { + /// Deprecated: Use scaleUpCooldownSeconds and scaleDownCooldownSeconds instead. Cooldown seconds after a scaling operation before another one. + #[serde(rename = "cooldownSeconds", skip_serializing_if = "Option::is_none")] + pub cooldown_seconds: Option, + /// Whether to disable autoscaling. Set to \"true\" when using Kubernetes HPA or any other 3rd party autoscaling strategies. + #[serde(rename = "disabled", skip_serializing_if = "Option::is_none")] + pub disabled: Option, + /// Lookback seconds to calculate the average pending messages and processing rate. + #[serde(rename = "lookbackSeconds", skip_serializing_if = "Option::is_none")] + pub lookback_seconds: Option, + /// Maximum replicas. + #[serde(rename = "max", skip_serializing_if = "Option::is_none")] + pub max: Option, + /// Minimum replicas. + #[serde(rename = "min", skip_serializing_if = "Option::is_none")] + pub min: Option, + /// ReplicasPerScale defines maximum replicas can be scaled up or down at once. The is use to prevent too aggressive scaling operations + #[serde(rename = "replicasPerScale", skip_serializing_if = "Option::is_none")] + pub replicas_per_scale: Option, + /// ScaleDownCooldownSeconds defines the cooldown seconds after a scaling operation, before a follow-up scaling down. It defaults to the CooldownSeconds if not set. + #[serde(rename = "scaleDownCooldownSeconds", skip_serializing_if = "Option::is_none")] + pub scale_down_cooldown_seconds: Option, + /// ScaleUpCooldownSeconds defines the cooldown seconds after a scaling operation, before a follow-up scaling up. It defaults to the CooldownSeconds if not set. + #[serde(rename = "scaleUpCooldownSeconds", skip_serializing_if = "Option::is_none")] + pub scale_up_cooldown_seconds: Option, + /// TargetBufferAvailability is used to define the target percentage of the buffer availability. A valid and meaningful value should be less than the BufferUsageLimit defined in the Edge spec (or Pipeline spec), for example, 50. It only applies to UDF and Sink vertices because only they have buffers to read. + #[serde(rename = "targetBufferAvailability", skip_serializing_if = "Option::is_none")] + pub target_buffer_availability: Option, + /// TargetProcessingSeconds is used to tune the aggressiveness of autoscaling for source vertices, it measures how fast you want the vertex to process all the pending messages. Typically increasing the value, which leads to lower processing rate, thus less replicas. It's only effective for source vertices. + #[serde(rename = "targetProcessingSeconds", skip_serializing_if = "Option::is_none")] + pub target_processing_seconds: Option, + /// After scaling down the source vertex to 0, sleep how many seconds before scaling the source vertex back up to peek. + #[serde(rename = "zeroReplicaSleepSeconds", skip_serializing_if = "Option::is_none")] + pub zero_replica_sleep_seconds: Option, +} + +impl Scale { + /// Scale defines the parameters for autoscaling. + pub fn new() -> Scale { + Scale { + cooldown_seconds: None, + disabled: None, + lookback_seconds: None, + max: None, + min: None, + replicas_per_scale: None, + scale_down_cooldown_seconds: None, + scale_up_cooldown_seconds: None, + target_buffer_availability: None, + target_processing_seconds: None, + zero_replica_sleep_seconds: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/serving_source.rs b/serving/numaflow-models/src/models/serving_source.rs new file mode 100644 index 0000000000..e1e6c444cf --- /dev/null +++ b/serving/numaflow-models/src/models/serving_source.rs @@ -0,0 +1,41 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// ServingSource : ServingSource is the HTTP endpoint for Numaflow. + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct ServingSource { + #[serde(rename = "auth", skip_serializing_if = "Option::is_none")] + pub auth: Option>, + /// The header key from which the message id will be extracted + #[serde(rename = "msgIDHeaderKey")] + pub msg_id_header_key: String, + /// Whether to create a ClusterIP Service + #[serde(rename = "service", skip_serializing_if = "Option::is_none")] + pub service: Option, + #[serde(rename = "store")] + pub store: Box, +} + +impl ServingSource { + /// ServingSource is the HTTP endpoint for Numaflow. + pub fn new(msg_id_header_key: String, store: crate::models::ServingStore) -> ServingSource { + ServingSource { + auth: None, + msg_id_header_key, + service: None, + store: Box::new(store), + } + } +} + + diff --git a/serving/numaflow-models/src/models/serving_store.rs b/serving/numaflow-models/src/models/serving_store.rs new file mode 100644 index 0000000000..44998b5e73 --- /dev/null +++ b/serving/numaflow-models/src/models/serving_store.rs @@ -0,0 +1,34 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// ServingStore : ServingStore to track and store data and metadata for tracking and serving. + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct ServingStore { + #[serde(rename = "ttl", skip_serializing_if = "Option::is_none")] + pub ttl: Option, + /// URL of the persistent store to write the callbacks + #[serde(rename = "url")] + pub url: String, +} + +impl ServingStore { + /// ServingStore to track and store data and metadata for tracking and serving. + pub fn new(url: String) -> ServingStore { + ServingStore { + ttl: None, + url, + } + } +} + + diff --git a/serving/numaflow-models/src/models/session_window.rs b/serving/numaflow-models/src/models/session_window.rs new file mode 100644 index 0000000000..67f1f440bc --- /dev/null +++ b/serving/numaflow-models/src/models/session_window.rs @@ -0,0 +1,30 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// SessionWindow : SessionWindow describes a session window + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SessionWindow { + #[serde(rename = "timeout", skip_serializing_if = "Option::is_none")] + pub timeout: Option, +} + +impl SessionWindow { + /// SessionWindow describes a session window + pub fn new() -> SessionWindow { + SessionWindow { + timeout: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/side_input.rs b/serving/numaflow-models/src/models/side_input.rs new file mode 100644 index 0000000000..9cb2040749 --- /dev/null +++ b/serving/numaflow-models/src/models/side_input.rs @@ -0,0 +1,39 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// SideInput : SideInput defines information of a Side Input + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SideInput { + #[serde(rename = "container")] + pub container: Box, + #[serde(rename = "name")] + pub name: String, + #[serde(rename = "trigger")] + pub trigger: Box, + #[serde(rename = "volumes", skip_serializing_if = "Option::is_none")] + pub volumes: Option>, +} + +impl SideInput { + /// SideInput defines information of a Side Input + pub fn new(container: crate::models::Container, name: String, trigger: crate::models::SideInputTrigger) -> SideInput { + SideInput { + container: Box::new(container), + name, + trigger: Box::new(trigger), + volumes: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/side_input_trigger.rs b/serving/numaflow-models/src/models/side_input_trigger.rs new file mode 100644 index 0000000000..f92cff7cd9 --- /dev/null +++ b/serving/numaflow-models/src/models/side_input_trigger.rs @@ -0,0 +1,32 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SideInputTrigger { + /// The schedule to trigger the retrievement of the side input data. It supports cron format, for example, \"0 30 * * * *\". Or interval based format, such as \"@hourly\", \"@every 1h30m\", etc. + #[serde(rename = "schedule")] + pub schedule: String, + #[serde(rename = "timezone", skip_serializing_if = "Option::is_none")] + pub timezone: Option, +} + +impl SideInputTrigger { + pub fn new(schedule: String) -> SideInputTrigger { + SideInputTrigger { + schedule, + timezone: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/side_inputs_manager_template.rs b/serving/numaflow-models/src/models/side_inputs_manager_template.rs new file mode 100644 index 0000000000..d9a8e6a4fc --- /dev/null +++ b/serving/numaflow-models/src/models/side_inputs_manager_template.rs @@ -0,0 +1,79 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SideInputsManagerTemplate { + #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] + pub affinity: Option, + /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + pub automount_service_account_token: Option, + #[serde(rename = "containerTemplate", skip_serializing_if = "Option::is_none")] + pub container_template: Option>, + #[serde(rename = "dnsConfig", skip_serializing_if = "Option::is_none")] + pub dns_config: Option, + /// Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + #[serde(rename = "dnsPolicy", skip_serializing_if = "Option::is_none")] + pub dns_policy: Option, + /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] + pub image_pull_secrets: Option>, + #[serde(rename = "initContainerTemplate", skip_serializing_if = "Option::is_none")] + pub init_container_template: Option>, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option>, + /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + #[serde(rename = "nodeSelector", skip_serializing_if = "Option::is_none")] + pub node_selector: Option<::std::collections::HashMap>, + /// The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priority", skip_serializing_if = "Option::is_none")] + pub priority: Option, + /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] + pub priority_class_name: Option, + /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] + pub runtime_class_name: Option, + #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] + pub security_context: Option, + /// ServiceAccountName applied to the pod + #[serde(rename = "serviceAccountName", skip_serializing_if = "Option::is_none")] + pub service_account_name: Option, + /// If specified, the pod's tolerations. + #[serde(rename = "tolerations", skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, +} + +impl SideInputsManagerTemplate { + pub fn new() -> SideInputsManagerTemplate { + SideInputsManagerTemplate { + affinity: None, + automount_service_account_token: None, + container_template: None, + dns_config: None, + dns_policy: None, + image_pull_secrets: None, + init_container_template: None, + metadata: None, + node_selector: None, + priority: None, + priority_class_name: None, + runtime_class_name: None, + security_context: None, + service_account_name: None, + tolerations: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/sink.rs b/serving/numaflow-models/src/models/sink.rs new file mode 100644 index 0000000000..ebed76c690 --- /dev/null +++ b/serving/numaflow-models/src/models/sink.rs @@ -0,0 +1,40 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Sink { + #[serde(rename = "blackhole", skip_serializing_if = "Option::is_none")] + pub blackhole: Option>, + #[serde(rename = "fallback", skip_serializing_if = "Option::is_none")] + pub fallback: Option>, + #[serde(rename = "kafka", skip_serializing_if = "Option::is_none")] + pub kafka: Option>, + #[serde(rename = "log", skip_serializing_if = "Option::is_none")] + pub log: Option>, + #[serde(rename = "udsink", skip_serializing_if = "Option::is_none")] + pub udsink: Option>, +} + +impl Sink { + pub fn new() -> Sink { + Sink { + blackhole: None, + fallback: None, + kafka: None, + log: None, + udsink: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/sliding_window.rs b/serving/numaflow-models/src/models/sliding_window.rs new file mode 100644 index 0000000000..10530f04b3 --- /dev/null +++ b/serving/numaflow-models/src/models/sliding_window.rs @@ -0,0 +1,37 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// SlidingWindow : SlidingWindow describes a sliding window + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SlidingWindow { + #[serde(rename = "length", skip_serializing_if = "Option::is_none")] + pub length: Option, + #[serde(rename = "slide", skip_serializing_if = "Option::is_none")] + pub slide: Option, + /// Streaming should be set to true if the reduce udf is streaming. + #[serde(rename = "streaming", skip_serializing_if = "Option::is_none")] + pub streaming: Option, +} + +impl SlidingWindow { + /// SlidingWindow describes a sliding window + pub fn new() -> SlidingWindow { + SlidingWindow { + length: None, + slide: None, + streaming: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/source.rs b/serving/numaflow-models/src/models/source.rs new file mode 100644 index 0000000000..fadfe1a402 --- /dev/null +++ b/serving/numaflow-models/src/models/source.rs @@ -0,0 +1,49 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Source { + #[serde(rename = "generator", skip_serializing_if = "Option::is_none")] + pub generator: Option>, + #[serde(rename = "http", skip_serializing_if = "Option::is_none")] + pub http: Option>, + #[serde(rename = "jetstream", skip_serializing_if = "Option::is_none")] + pub jetstream: Option>, + #[serde(rename = "kafka", skip_serializing_if = "Option::is_none")] + pub kafka: Option>, + #[serde(rename = "nats", skip_serializing_if = "Option::is_none")] + pub nats: Option>, + #[serde(rename = "serving", skip_serializing_if = "Option::is_none")] + pub serving: Option>, + #[serde(rename = "transformer", skip_serializing_if = "Option::is_none")] + pub transformer: Option>, + #[serde(rename = "udsource", skip_serializing_if = "Option::is_none")] + pub udsource: Option>, +} + +impl Source { + pub fn new() -> Source { + Source { + generator: None, + http: None, + jetstream: None, + kafka: None, + nats: None, + serving: None, + transformer: None, + udsource: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/status.rs b/serving/numaflow-models/src/models/status.rs new file mode 100644 index 0000000000..accf9e49d2 --- /dev/null +++ b/serving/numaflow-models/src/models/status.rs @@ -0,0 +1,31 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// Status : Status is a common structure which can be used for Status field. + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Status { + /// Conditions are the latest available observations of a resource's current state. + #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] + pub conditions: Option>, +} + +impl Status { + /// Status is a common structure which can be used for Status field. + pub fn new() -> Status { + Status { + conditions: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/tag_conditions.rs b/serving/numaflow-models/src/models/tag_conditions.rs new file mode 100644 index 0000000000..42b6b94e0a --- /dev/null +++ b/serving/numaflow-models/src/models/tag_conditions.rs @@ -0,0 +1,33 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct TagConditions { + /// Operator specifies the type of operation that should be used for conditional forwarding value could be \"and\", \"or\", \"not\" + #[serde(rename = "operator", skip_serializing_if = "Option::is_none")] + pub operator: Option, + /// Values tag values for conditional forwarding + #[serde(rename = "values")] + pub values: Vec, +} + +impl TagConditions { + pub fn new(values: Vec) -> TagConditions { + TagConditions { + operator: None, + values, + } + } +} + + diff --git a/serving/numaflow-models/src/models/templates.rs b/serving/numaflow-models/src/models/templates.rs new file mode 100644 index 0000000000..382019d5b5 --- /dev/null +++ b/serving/numaflow-models/src/models/templates.rs @@ -0,0 +1,37 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Templates { + #[serde(rename = "daemon", skip_serializing_if = "Option::is_none")] + pub daemon: Option>, + #[serde(rename = "job", skip_serializing_if = "Option::is_none")] + pub job: Option>, + #[serde(rename = "sideInputsManager", skip_serializing_if = "Option::is_none")] + pub side_inputs_manager: Option>, + #[serde(rename = "vertex", skip_serializing_if = "Option::is_none")] + pub vertex: Option>, +} + +impl Templates { + pub fn new() -> Templates { + Templates { + daemon: None, + job: None, + side_inputs_manager: None, + vertex: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/tls.rs b/serving/numaflow-models/src/models/tls.rs new file mode 100644 index 0000000000..b140b68531 --- /dev/null +++ b/serving/numaflow-models/src/models/tls.rs @@ -0,0 +1,37 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Tls { + #[serde(rename = "caCertSecret", skip_serializing_if = "Option::is_none")] + pub ca_cert_secret: Option, + #[serde(rename = "certSecret", skip_serializing_if = "Option::is_none")] + pub cert_secret: Option, + #[serde(rename = "insecureSkipVerify", skip_serializing_if = "Option::is_none")] + pub insecure_skip_verify: Option, + #[serde(rename = "keySecret", skip_serializing_if = "Option::is_none")] + pub key_secret: Option, +} + +impl Tls { + pub fn new() -> Tls { + Tls { + ca_cert_secret: None, + cert_secret: None, + insecure_skip_verify: None, + key_secret: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/transformer.rs b/serving/numaflow-models/src/models/transformer.rs new file mode 100644 index 0000000000..5540a6b6f9 --- /dev/null +++ b/serving/numaflow-models/src/models/transformer.rs @@ -0,0 +1,34 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Transformer { + #[serde(rename = "args", skip_serializing_if = "Option::is_none")] + pub args: Option>, + #[serde(rename = "kwargs", skip_serializing_if = "Option::is_none")] + pub kwargs: Option<::std::collections::HashMap>, + #[serde(rename = "name")] + pub name: String, +} + +impl Transformer { + pub fn new(name: String) -> Transformer { + Transformer { + args: None, + kwargs: None, + name, + } + } +} + + diff --git a/serving/numaflow-models/src/models/ud_sink.rs b/serving/numaflow-models/src/models/ud_sink.rs new file mode 100644 index 0000000000..f39a053de5 --- /dev/null +++ b/serving/numaflow-models/src/models/ud_sink.rs @@ -0,0 +1,28 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct UdSink { + #[serde(rename = "container")] + pub container: Box, +} + +impl UdSink { + pub fn new(container: crate::models::Container) -> UdSink { + UdSink { + container: Box::new(container), + } + } +} + + diff --git a/serving/numaflow-models/src/models/ud_source.rs b/serving/numaflow-models/src/models/ud_source.rs new file mode 100644 index 0000000000..2242908feb --- /dev/null +++ b/serving/numaflow-models/src/models/ud_source.rs @@ -0,0 +1,28 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct UdSource { + #[serde(rename = "container")] + pub container: Box, +} + +impl UdSource { + pub fn new(container: crate::models::Container) -> UdSource { + UdSource { + container: Box::new(container), + } + } +} + + diff --git a/serving/numaflow-models/src/models/ud_transformer.rs b/serving/numaflow-models/src/models/ud_transformer.rs new file mode 100644 index 0000000000..d397305f2b --- /dev/null +++ b/serving/numaflow-models/src/models/ud_transformer.rs @@ -0,0 +1,31 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct UdTransformer { + #[serde(rename = "builtin", skip_serializing_if = "Option::is_none")] + pub builtin: Option>, + #[serde(rename = "container", skip_serializing_if = "Option::is_none")] + pub container: Option>, +} + +impl UdTransformer { + pub fn new() -> UdTransformer { + UdTransformer { + builtin: None, + container: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/udf.rs b/serving/numaflow-models/src/models/udf.rs new file mode 100644 index 0000000000..cfeae12666 --- /dev/null +++ b/serving/numaflow-models/src/models/udf.rs @@ -0,0 +1,34 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Udf { + #[serde(rename = "builtin", skip_serializing_if = "Option::is_none")] + pub builtin: Option>, + #[serde(rename = "container", skip_serializing_if = "Option::is_none")] + pub container: Option>, + #[serde(rename = "groupBy", skip_serializing_if = "Option::is_none")] + pub group_by: Option>, +} + +impl Udf { + pub fn new() -> Udf { + Udf { + builtin: None, + container: None, + group_by: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/vertex.rs b/serving/numaflow-models/src/models/vertex.rs new file mode 100644 index 0000000000..d23f3023c5 --- /dev/null +++ b/serving/numaflow-models/src/models/vertex.rs @@ -0,0 +1,42 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Vertex { + /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + #[serde(rename = "apiVersion", skip_serializing_if = "Option::is_none")] + pub api_version: Option, + /// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(rename = "kind", skip_serializing_if = "Option::is_none")] + pub kind: Option, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option, + #[serde(rename = "spec")] + pub spec: Box, + #[serde(rename = "status", skip_serializing_if = "Option::is_none")] + pub status: Option>, +} + +impl Vertex { + pub fn new(spec: crate::models::VertexSpec) -> Vertex { + Vertex { + api_version: None, + kind: None, + metadata: None, + spec: Box::new(spec), + status: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/vertex_instance.rs b/serving/numaflow-models/src/models/vertex_instance.rs new file mode 100644 index 0000000000..6c3298c345 --- /dev/null +++ b/serving/numaflow-models/src/models/vertex_instance.rs @@ -0,0 +1,36 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// VertexInstance : VertexInstance is a wrapper of a vertex instance, which contains the vertex spec and the instance information such as hostname and replica index. + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct VertexInstance { + #[serde(rename = "hostname", skip_serializing_if = "Option::is_none")] + pub hostname: Option, + #[serde(rename = "replica", skip_serializing_if = "Option::is_none")] + pub replica: Option, + #[serde(rename = "vertex", skip_serializing_if = "Option::is_none")] + pub vertex: Option>, +} + +impl VertexInstance { + /// VertexInstance is a wrapper of a vertex instance, which contains the vertex spec and the instance information such as hostname and replica index. + pub fn new() -> VertexInstance { + VertexInstance { + hostname: None, + replica: None, + vertex: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/vertex_limits.rs b/serving/numaflow-models/src/models/vertex_limits.rs new file mode 100644 index 0000000000..713c0faa41 --- /dev/null +++ b/serving/numaflow-models/src/models/vertex_limits.rs @@ -0,0 +1,40 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct VertexLimits { + /// BufferMaxLength is used to define the max length of a buffer. It overrides the settings from pipeline limits. + #[serde(rename = "bufferMaxLength", skip_serializing_if = "Option::is_none")] + pub buffer_max_length: Option, + /// BufferUsageLimit is used to define the percentage of the buffer usage limit, a valid value should be less than 100, for example, 85. It overrides the settings from pipeline limits. + #[serde(rename = "bufferUsageLimit", skip_serializing_if = "Option::is_none")] + pub buffer_usage_limit: Option, + /// Read batch size from the source or buffer. It overrides the settings from pipeline limits. + #[serde(rename = "readBatchSize", skip_serializing_if = "Option::is_none")] + pub read_batch_size: Option, + #[serde(rename = "readTimeout", skip_serializing_if = "Option::is_none")] + pub read_timeout: Option, +} + +impl VertexLimits { + pub fn new() -> VertexLimits { + VertexLimits { + buffer_max_length: None, + buffer_usage_limit: None, + read_batch_size: None, + read_timeout: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/vertex_list.rs b/serving/numaflow-models/src/models/vertex_list.rs new file mode 100644 index 0000000000..a350a3b983 --- /dev/null +++ b/serving/numaflow-models/src/models/vertex_list.rs @@ -0,0 +1,39 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct VertexList { + /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + #[serde(rename = "apiVersion", skip_serializing_if = "Option::is_none")] + pub api_version: Option, + #[serde(rename = "items")] + pub items: Vec, + /// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(rename = "kind", skip_serializing_if = "Option::is_none")] + pub kind: Option, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option, +} + +impl VertexList { + pub fn new(items: Vec) -> VertexList { + VertexList { + api_version: None, + items, + kind: None, + metadata: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/vertex_spec.rs b/serving/numaflow-models/src/models/vertex_spec.rs new file mode 100644 index 0000000000..f96eb218ed --- /dev/null +++ b/serving/numaflow-models/src/models/vertex_spec.rs @@ -0,0 +1,137 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct VertexSpec { + #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] + pub affinity: Option, + /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + pub automount_service_account_token: Option, + #[serde(rename = "containerTemplate", skip_serializing_if = "Option::is_none")] + pub container_template: Option>, + #[serde(rename = "dnsConfig", skip_serializing_if = "Option::is_none")] + pub dns_config: Option, + /// Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + #[serde(rename = "dnsPolicy", skip_serializing_if = "Option::is_none")] + pub dns_policy: Option, + #[serde(rename = "fromEdges", skip_serializing_if = "Option::is_none")] + pub from_edges: Option>, + /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] + pub image_pull_secrets: Option>, + #[serde(rename = "initContainerTemplate", skip_serializing_if = "Option::is_none")] + pub init_container_template: Option>, + /// List of customized init containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + #[serde(rename = "initContainers", skip_serializing_if = "Option::is_none")] + pub init_containers: Option>, + #[serde(rename = "interStepBufferServiceName", skip_serializing_if = "Option::is_none")] + pub inter_step_buffer_service_name: Option, + #[serde(rename = "limits", skip_serializing_if = "Option::is_none")] + pub limits: Option>, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option>, + #[serde(rename = "name")] + pub name: String, + /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + #[serde(rename = "nodeSelector", skip_serializing_if = "Option::is_none")] + pub node_selector: Option<::std::collections::HashMap>, + /// Number of partitions of the vertex owned buffers. It applies to udf and sink vertices only. + #[serde(rename = "partitions", skip_serializing_if = "Option::is_none")] + pub partitions: Option, + #[serde(rename = "pipelineName")] + pub pipeline_name: String, + /// The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priority", skip_serializing_if = "Option::is_none")] + pub priority: Option, + /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] + pub priority_class_name: Option, + #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] + pub runtime_class_name: Option, + #[serde(rename = "scale", skip_serializing_if = "Option::is_none")] + pub scale: Option>, + #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] + pub security_context: Option, + /// ServiceAccountName applied to the pod + #[serde(rename = "serviceAccountName", skip_serializing_if = "Option::is_none")] + pub service_account_name: Option, + /// Names of the side inputs used in this vertex. + #[serde(rename = "sideInputs", skip_serializing_if = "Option::is_none")] + pub side_inputs: Option>, + #[serde(rename = "sideInputsContainerTemplate", skip_serializing_if = "Option::is_none")] + pub side_inputs_container_template: Option>, + /// List of customized sidecar containers belonging to the pod. + #[serde(rename = "sidecars", skip_serializing_if = "Option::is_none")] + pub sidecars: Option>, + #[serde(rename = "sink", skip_serializing_if = "Option::is_none")] + pub sink: Option>, + #[serde(rename = "source", skip_serializing_if = "Option::is_none")] + pub source: Option>, + #[serde(rename = "toEdges", skip_serializing_if = "Option::is_none")] + pub to_edges: Option>, + /// If specified, the pod's tolerations. + #[serde(rename = "tolerations", skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, + #[serde(rename = "udf", skip_serializing_if = "Option::is_none")] + pub udf: Option>, + #[serde(rename = "volumes", skip_serializing_if = "Option::is_none")] + pub volumes: Option>, + #[serde(rename = "watermark", skip_serializing_if = "Option::is_none")] + pub watermark: Option>, +} + +impl VertexSpec { + pub fn new(name: String, pipeline_name: String) -> VertexSpec { + VertexSpec { + affinity: None, + automount_service_account_token: None, + container_template: None, + dns_config: None, + dns_policy: None, + from_edges: None, + image_pull_secrets: None, + init_container_template: None, + init_containers: None, + inter_step_buffer_service_name: None, + limits: None, + metadata: None, + name, + node_selector: None, + partitions: None, + pipeline_name, + priority: None, + priority_class_name: None, + replicas: None, + runtime_class_name: None, + scale: None, + security_context: None, + service_account_name: None, + side_inputs: None, + side_inputs_container_template: None, + sidecars: None, + sink: None, + source: None, + to_edges: None, + tolerations: None, + udf: None, + volumes: None, + watermark: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/vertex_status.rs b/serving/numaflow-models/src/models/vertex_status.rs new file mode 100644 index 0000000000..3e0377a795 --- /dev/null +++ b/serving/numaflow-models/src/models/vertex_status.rs @@ -0,0 +1,50 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct VertexStatus { + /// Conditions are the latest available observations of a resource's current state. + #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + #[serde(rename = "lastScaledAt", skip_serializing_if = "Option::is_none")] + pub last_scaled_at: Option, + #[serde(rename = "message", skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(rename = "observedGeneration", skip_serializing_if = "Option::is_none")] + pub observed_generation: Option, + #[serde(rename = "phase")] + pub phase: String, + #[serde(rename = "reason", skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(rename = "replicas")] + pub replicas: i64, + #[serde(rename = "selector", skip_serializing_if = "Option::is_none")] + pub selector: Option, +} + +impl VertexStatus { + pub fn new(phase: String, replicas: i64) -> VertexStatus { + VertexStatus { + conditions: None, + last_scaled_at: None, + message: None, + observed_generation: None, + phase, + reason: None, + replicas, + selector: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/vertex_template.rs b/serving/numaflow-models/src/models/vertex_template.rs new file mode 100644 index 0000000000..6bd04e9cad --- /dev/null +++ b/serving/numaflow-models/src/models/vertex_template.rs @@ -0,0 +1,79 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct VertexTemplate { + #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] + pub affinity: Option, + /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + pub automount_service_account_token: Option, + #[serde(rename = "containerTemplate", skip_serializing_if = "Option::is_none")] + pub container_template: Option>, + #[serde(rename = "dnsConfig", skip_serializing_if = "Option::is_none")] + pub dns_config: Option, + /// Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + #[serde(rename = "dnsPolicy", skip_serializing_if = "Option::is_none")] + pub dns_policy: Option, + /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] + pub image_pull_secrets: Option>, + #[serde(rename = "initContainerTemplate", skip_serializing_if = "Option::is_none")] + pub init_container_template: Option>, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option>, + /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + #[serde(rename = "nodeSelector", skip_serializing_if = "Option::is_none")] + pub node_selector: Option<::std::collections::HashMap>, + /// The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priority", skip_serializing_if = "Option::is_none")] + pub priority: Option, + /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] + pub priority_class_name: Option, + /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] + pub runtime_class_name: Option, + #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] + pub security_context: Option, + /// ServiceAccountName applied to the pod + #[serde(rename = "serviceAccountName", skip_serializing_if = "Option::is_none")] + pub service_account_name: Option, + /// If specified, the pod's tolerations. + #[serde(rename = "tolerations", skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, +} + +impl VertexTemplate { + pub fn new() -> VertexTemplate { + VertexTemplate { + affinity: None, + automount_service_account_token: None, + container_template: None, + dns_config: None, + dns_policy: None, + image_pull_secrets: None, + init_container_template: None, + metadata: None, + node_selector: None, + priority: None, + priority_class_name: None, + runtime_class_name: None, + security_context: None, + service_account_name: None, + tolerations: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/watermark.rs b/serving/numaflow-models/src/models/watermark.rs new file mode 100644 index 0000000000..e32de79335 --- /dev/null +++ b/serving/numaflow-models/src/models/watermark.rs @@ -0,0 +1,35 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Watermark { + /// Disabled toggles the watermark propagation, defaults to false. + #[serde(rename = "disabled", skip_serializing_if = "Option::is_none")] + pub disabled: Option, + #[serde(rename = "idleSource", skip_serializing_if = "Option::is_none")] + pub idle_source: Option>, + #[serde(rename = "maxDelay", skip_serializing_if = "Option::is_none")] + pub max_delay: Option, +} + +impl Watermark { + pub fn new() -> Watermark { + Watermark { + disabled: None, + idle_source: None, + max_delay: None, + } + } +} + + diff --git a/serving/numaflow-models/src/models/window.rs b/serving/numaflow-models/src/models/window.rs new file mode 100644 index 0000000000..3f72f7be34 --- /dev/null +++ b/serving/numaflow-models/src/models/window.rs @@ -0,0 +1,36 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +/// Window : Window describes windowing strategy + + + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Window { + #[serde(rename = "fixed", skip_serializing_if = "Option::is_none")] + pub fixed: Option>, + #[serde(rename = "session", skip_serializing_if = "Option::is_none")] + pub session: Option>, + #[serde(rename = "sliding", skip_serializing_if = "Option::is_none")] + pub sliding: Option>, +} + +impl Window { + /// Window describes windowing strategy + pub fn new() -> Window { + Window { + fixed: None, + session: None, + sliding: None, + } + } +} + + From b65edc1fdf61ffbaafad139d46ad7b950fb8d7ed Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Tue, 6 Aug 2024 07:42:06 -0700 Subject: [PATCH 05/23] chore: add cargo fmt to numaflow-models (#1902) Signed-off-by: Vigith Maurice --- serving/numaflow-models/Makefile | 1 + .../numaflow-models/src/apis/configuration.rs | 6 +--- serving/numaflow-models/src/apis/mod.rs | 17 +++++----- serving/numaflow-models/src/lib.rs | 2 +- .../src/models/abstract_pod_template.rs | 11 +++---- .../src/models/abstract_sink.rs | 7 +--- .../src/models/abstract_vertex.rs | 22 +++++++------ .../src/models/authorization.rs | 11 ++----- .../numaflow-models/src/models/basic_auth.rs | 6 +--- .../numaflow-models/src/models/blackhole.rs | 12 ++----- .../src/models/buffer_service_config.rs | 7 +--- .../src/models/combined_edge.rs | 23 ++++++++----- .../numaflow-models/src/models/container.rs | 6 +--- .../src/models/container_builder.rs | 17 +++++----- .../src/models/container_template.rs | 6 +--- .../src/models/daemon_template.rs | 17 +++++----- serving/numaflow-models/src/models/edge.rs | 7 +--- .../src/models/fixed_window.rs | 6 +--- .../src/models/forward_conditions.rs | 7 +--- .../numaflow-models/src/models/function.rs | 7 +--- .../src/models/generator_source.rs | 7 +--- .../src/models/get_container_req.rs | 16 ++++++---- .../src/models/get_daemon_deployment_req.rs | 15 +++++---- .../models/get_jet_stream_service_spec_req.rs | 15 +++++---- .../get_jet_stream_stateful_set_spec_req.rs | 25 +++++++++++---- .../src/models/get_redis_service_spec_req.rs | 13 ++++---- .../models/get_redis_stateful_set_spec_req.rs | 26 +++++++++++---- .../models/get_side_input_deployment_req.rs | 15 +++++---- .../src/models/get_vertex_pod_spec_req.rs | 18 +++++++---- .../numaflow-models/src/models/group_by.rs | 6 +--- serving/numaflow-models/src/models/gssapi.rs | 17 ++++++---- .../numaflow-models/src/models/http_source.rs | 7 +--- .../numaflow-models/src/models/idle_source.rs | 7 +--- .../src/models/inter_step_buffer_service.rs | 7 +--- .../models/inter_step_buffer_service_list.rs | 11 +++---- .../models/inter_step_buffer_service_spec.rs | 7 +--- .../inter_step_buffer_service_status.rs | 7 +--- .../src/models/jet_stream_buffer_service.rs | 22 +++++++------ .../src/models/jet_stream_config.rs | 7 +--- .../src/models/jet_stream_source.rs | 7 +--- .../src/models/job_template.rs | 17 +++++----- .../numaflow-models/src/models/kafka_sink.rs | 7 +--- .../src/models/kafka_source.rs | 7 +--- .../numaflow-models/src/models/lifecycle.rs | 17 +++++----- serving/numaflow-models/src/models/log.rs | 13 ++------ .../numaflow-models/src/models/metadata.rs | 7 +--- .../src/models/native_redis.rs | 32 ++++++++++++------- .../numaflow-models/src/models/nats_auth.rs | 6 +--- .../numaflow-models/src/models/nats_source.rs | 7 +--- .../numaflow-models/src/models/no_store.rs | 12 ++----- .../numaflow-models/src/models/pbq_storage.rs | 11 +++---- .../src/models/persistence_strategy.rs | 6 +--- .../numaflow-models/src/models/pipeline.rs | 7 +--- .../src/models/pipeline_limits.rs | 7 +--- .../src/models/pipeline_list.rs | 7 +--- .../src/models/pipeline_spec.rs | 12 +++---- .../src/models/pipeline_status.rs | 7 +--- .../src/models/redis_buffer_service.rs | 7 +--- .../src/models/redis_config.rs | 7 +--- .../src/models/redis_settings.rs | 7 +--- serving/numaflow-models/src/models/sasl.rs | 7 +--- .../numaflow-models/src/models/sasl_plain.rs | 12 +++---- serving/numaflow-models/src/models/scale.rs | 31 ++++++++++++------ .../src/models/serving_source.rs | 6 +--- .../src/models/serving_store.rs | 11 ++----- .../src/models/session_window.rs | 10 ++---- .../numaflow-models/src/models/side_input.rs | 12 +++---- .../src/models/side_input_trigger.rs | 7 +--- .../models/side_inputs_manager_template.rs | 17 +++++----- serving/numaflow-models/src/models/sink.rs | 7 +--- .../src/models/sliding_window.rs | 6 +--- serving/numaflow-models/src/models/source.rs | 7 +--- serving/numaflow-models/src/models/status.rs | 10 ++---- .../src/models/tag_conditions.rs | 7 +--- .../numaflow-models/src/models/templates.rs | 7 +--- serving/numaflow-models/src/models/tls.rs | 7 +--- .../numaflow-models/src/models/transformer.rs | 7 +--- serving/numaflow-models/src/models/ud_sink.rs | 7 +--- .../numaflow-models/src/models/ud_source.rs | 7 +--- .../src/models/ud_transformer.rs | 7 +--- serving/numaflow-models/src/models/udf.rs | 7 +--- serving/numaflow-models/src/models/vertex.rs | 7 +--- .../src/models/vertex_instance.rs | 6 +--- .../src/models/vertex_limits.rs | 7 +--- .../numaflow-models/src/models/vertex_list.rs | 7 +--- .../numaflow-models/src/models/vertex_spec.rs | 27 ++++++++++------ .../src/models/vertex_status.rs | 7 +--- .../src/models/vertex_template.rs | 17 +++++----- .../numaflow-models/src/models/watermark.rs | 7 +--- serving/numaflow-models/src/models/window.rs | 6 +--- 90 files changed, 364 insertions(+), 582 deletions(-) diff --git a/serving/numaflow-models/Makefile b/serving/numaflow-models/Makefile index 9e82883f4d..ab2155f3a1 100644 --- a/serving/numaflow-models/Makefile +++ b/serving/numaflow-models/Makefile @@ -61,3 +61,4 @@ generate: sed -e 's/edition = "2018"/edition = "2021"/g' -e 's/authors =.*/authors = \["Numaflow Developers"\]/' -e 's/license =.*/license = "Apache License 2.0"/' Cargo.toml > tmp && mv tmp Cargo.toml cargo add kube cargo add k8s-openapi --features v1_29 + cargo fmt diff --git a/serving/numaflow-models/src/apis/configuration.rs b/serving/numaflow-models/src/apis/configuration.rs index 5c65cc5721..da73e26e0b 100644 --- a/serving/numaflow-models/src/apis/configuration.rs +++ b/serving/numaflow-models/src/apis/configuration.rs @@ -4,12 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - #[derive(Debug, Clone)] pub struct Configuration { pub base_path: String, @@ -30,7 +28,6 @@ pub struct ApiKey { pub key: String, } - impl Configuration { pub fn new() -> Configuration { Configuration::default() @@ -47,7 +44,6 @@ impl Default for Configuration { oauth_access_token: None, bearer_access_token: None, api_key: None, - } } } diff --git a/serving/numaflow-models/src/apis/mod.rs b/serving/numaflow-models/src/apis/mod.rs index dccbc940fb..5fb0cf38c7 100644 --- a/serving/numaflow-models/src/apis/mod.rs +++ b/serving/numaflow-models/src/apis/mod.rs @@ -16,7 +16,7 @@ pub enum Error { ResponseError(ResponseContent), } -impl fmt::Display for Error { +impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let (module, e) = match self { Error::Reqwest(e) => ("reqwest", e.to_string()), @@ -28,7 +28,7 @@ impl fmt::Display for Error { } } -impl error::Error for Error { +impl error::Error for Error { fn source(&self) -> Option<&(dyn error::Error + 'static)> { Some(match self { Error::Reqwest(e) => e, @@ -39,19 +39,19 @@ impl error::Error for Error { } } -impl From for Error { +impl From for Error { fn from(e: reqwest::Error) -> Self { Error::Reqwest(e) } } -impl From for Error { +impl From for Error { fn from(e: serde_json::Error) -> Self { Error::Serde(e) } } -impl From for Error { +impl From for Error { fn from(e: std::io::Error) -> Self { Error::Io(e) } @@ -78,8 +78,10 @@ pub fn parse_deep_object(prefix: &str, value: &serde_json::Value) -> Vec<(String value, )); } - }, - serde_json::Value::String(s) => params.push((format!("{}[{}]", prefix, key), s.clone())), + } + serde_json::Value::String(s) => { + params.push((format!("{}[{}]", prefix, key), s.clone())) + } _ => params.push((format!("{}[{}]", prefix, key), value.to_string())), } } @@ -90,5 +92,4 @@ pub fn parse_deep_object(prefix: &str, value: &serde_json::Value) -> Vec<(String unimplemented!("Only objects are supported with style=deepObject") } - pub mod configuration; diff --git a/serving/numaflow-models/src/lib.rs b/serving/numaflow-models/src/lib.rs index c1dd666f79..fc22e4e4b9 100644 --- a/serving/numaflow-models/src/lib.rs +++ b/serving/numaflow-models/src/lib.rs @@ -1,10 +1,10 @@ #[macro_use] extern crate serde_derive; +extern crate reqwest; extern crate serde; extern crate serde_json; extern crate url; -extern crate reqwest; pub mod apis; pub mod models; diff --git a/serving/numaflow-models/src/models/abstract_pod_template.rs b/serving/numaflow-models/src/models/abstract_pod_template.rs index fd19a7b425..6de4149ab1 100644 --- a/serving/numaflow-models/src/models/abstract_pod_template.rs +++ b/serving/numaflow-models/src/models/abstract_pod_template.rs @@ -4,20 +4,21 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// AbstractPodTemplate : AbstractPodTemplate provides a template for pod customization in vertices, daemon deployments and so on. - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AbstractPodTemplate { #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] pub affinity: Option, /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "automountServiceAccountToken", + skip_serializing_if = "Option::is_none" + )] pub automount_service_account_token: Option, #[serde(rename = "dnsConfig", skip_serializing_if = "Option::is_none")] pub dns_config: Option, @@ -71,5 +72,3 @@ impl AbstractPodTemplate { } } } - - diff --git a/serving/numaflow-models/src/models/abstract_sink.rs b/serving/numaflow-models/src/models/abstract_sink.rs index 48bf8c8276..ab8f2ba058 100644 --- a/serving/numaflow-models/src/models/abstract_sink.rs +++ b/serving/numaflow-models/src/models/abstract_sink.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AbstractSink { #[serde(rename = "blackhole", skip_serializing_if = "Option::is_none")] @@ -33,5 +30,3 @@ impl AbstractSink { } } } - - diff --git a/serving/numaflow-models/src/models/abstract_vertex.rs b/serving/numaflow-models/src/models/abstract_vertex.rs index 1ee864662e..62fc1240ce 100644 --- a/serving/numaflow-models/src/models/abstract_vertex.rs +++ b/serving/numaflow-models/src/models/abstract_vertex.rs @@ -4,19 +4,19 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AbstractVertex { #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] pub affinity: Option, /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "automountServiceAccountToken", + skip_serializing_if = "Option::is_none" + )] pub automount_service_account_token: Option, #[serde(rename = "containerTemplate", skip_serializing_if = "Option::is_none")] pub container_template: Option>, @@ -28,7 +28,10 @@ pub struct AbstractVertex { /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] pub image_pull_secrets: Option>, - #[serde(rename = "initContainerTemplate", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "initContainerTemplate", + skip_serializing_if = "Option::is_none" + )] pub init_container_template: Option>, /// List of customized init containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ #[serde(rename = "initContainers", skip_serializing_if = "Option::is_none")] @@ -64,7 +67,10 @@ pub struct AbstractVertex { /// Names of the side inputs used in this vertex. #[serde(rename = "sideInputs", skip_serializing_if = "Option::is_none")] pub side_inputs: Option>, - #[serde(rename = "sideInputsContainerTemplate", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "sideInputsContainerTemplate", + skip_serializing_if = "Option::is_none" + )] pub side_inputs_container_template: Option>, /// List of customized sidecar containers belonging to the pod. #[serde(rename = "sidecars", skip_serializing_if = "Option::is_none")] @@ -115,5 +121,3 @@ impl AbstractVertex { } } } - - diff --git a/serving/numaflow-models/src/models/authorization.rs b/serving/numaflow-models/src/models/authorization.rs index 6589dc82f4..f1242768d2 100644 --- a/serving/numaflow-models/src/models/authorization.rs +++ b/serving/numaflow-models/src/models/authorization.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Authorization { #[serde(rename = "token", skip_serializing_if = "Option::is_none")] @@ -19,10 +16,6 @@ pub struct Authorization { impl Authorization { pub fn new() -> Authorization { - Authorization { - token: None, - } + Authorization { token: None } } } - - diff --git a/serving/numaflow-models/src/models/basic_auth.rs b/serving/numaflow-models/src/models/basic_auth.rs index e7a4e7d2c7..1b5eea25d8 100644 --- a/serving/numaflow-models/src/models/basic_auth.rs +++ b/serving/numaflow-models/src/models/basic_auth.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// BasicAuth : BasicAuth represents the basic authentication approach which contains a user name and a password. - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BasicAuth { #[serde(rename = "password", skip_serializing_if = "Option::is_none")] @@ -29,5 +27,3 @@ impl BasicAuth { } } } - - diff --git a/serving/numaflow-models/src/models/blackhole.rs b/serving/numaflow-models/src/models/blackhole.rs index f84586c042..145871aa16 100644 --- a/serving/numaflow-models/src/models/blackhole.rs +++ b/serving/numaflow-models/src/models/blackhole.rs @@ -4,24 +4,18 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// Blackhole : Blackhole is a sink to emulate /dev/null - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct Blackhole { -} +pub struct Blackhole {} impl Blackhole { /// Blackhole is a sink to emulate /dev/null pub fn new() -> Blackhole { - Blackhole { - } + Blackhole {} } } - - diff --git a/serving/numaflow-models/src/models/buffer_service_config.rs b/serving/numaflow-models/src/models/buffer_service_config.rs index bad5eec796..24e621c1ea 100644 --- a/serving/numaflow-models/src/models/buffer_service_config.rs +++ b/serving/numaflow-models/src/models/buffer_service_config.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BufferServiceConfig { #[serde(rename = "jetstream", skip_serializing_if = "Option::is_none")] @@ -27,5 +24,3 @@ impl BufferServiceConfig { } } } - - diff --git a/serving/numaflow-models/src/models/combined_edge.rs b/serving/numaflow-models/src/models/combined_edge.rs index b7fdf05446..94d478727c 100644 --- a/serving/numaflow-models/src/models/combined_edge.rs +++ b/serving/numaflow-models/src/models/combined_edge.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// CombinedEdge : CombinedEdge is a combination of Edge and some other properties such as vertex type, partitions, limits. It's used to decorate the fromEdges and toEdges of the generated Vertex objects, so that in the vertex pod, it knows the properties of the connected vertices, for example, how many partitioned buffers I should write to, what is the write buffer length, etc. - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CombinedEdge { #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] @@ -21,7 +19,10 @@ pub struct CombinedEdge { #[serde(rename = "fromVertexLimits", skip_serializing_if = "Option::is_none")] pub from_vertex_limits: Option>, /// The number of partitions of the from vertex, if not provided, the default value is set to \"1\". - #[serde(rename = "fromVertexPartitionCount", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "fromVertexPartitionCount", + skip_serializing_if = "Option::is_none" + )] pub from_vertex_partition_count: Option, /// From vertex type. #[serde(rename = "fromVertexType")] @@ -34,7 +35,10 @@ pub struct CombinedEdge { #[serde(rename = "toVertexLimits", skip_serializing_if = "Option::is_none")] pub to_vertex_limits: Option>, /// The number of partitions of the to vertex, if not provided, the default value is set to \"1\". - #[serde(rename = "toVertexPartitionCount", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "toVertexPartitionCount", + skip_serializing_if = "Option::is_none" + )] pub to_vertex_partition_count: Option, /// To vertex type. #[serde(rename = "toVertexType")] @@ -43,7 +47,12 @@ pub struct CombinedEdge { impl CombinedEdge { /// CombinedEdge is a combination of Edge and some other properties such as vertex type, partitions, limits. It's used to decorate the fromEdges and toEdges of the generated Vertex objects, so that in the vertex pod, it knows the properties of the connected vertices, for example, how many partitioned buffers I should write to, what is the write buffer length, etc. - pub fn new(from: String, from_vertex_type: String, to: String, to_vertex_type: String) -> CombinedEdge { + pub fn new( + from: String, + from_vertex_type: String, + to: String, + to_vertex_type: String, + ) -> CombinedEdge { CombinedEdge { conditions: None, from, @@ -58,5 +67,3 @@ impl CombinedEdge { } } } - - diff --git a/serving/numaflow-models/src/models/container.rs b/serving/numaflow-models/src/models/container.rs index 22d8643134..4d3d2730a0 100644 --- a/serving/numaflow-models/src/models/container.rs +++ b/serving/numaflow-models/src/models/container.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// Container : Container is used to define the container properties for user-defined functions, sinks, etc. - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Container { #[serde(rename = "args", skip_serializing_if = "Option::is_none")] @@ -50,5 +48,3 @@ impl Container { } } } - - diff --git a/serving/numaflow-models/src/models/container_builder.rs b/serving/numaflow-models/src/models/container_builder.rs index 47a84c6fe7..35898065d5 100644 --- a/serving/numaflow-models/src/models/container_builder.rs +++ b/serving/numaflow-models/src/models/container_builder.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ContainerBuilder { /// Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell @@ -62,10 +59,16 @@ pub struct ContainerBuilder { #[serde(rename = "stdinOnce", skip_serializing_if = "Option::is_none")] pub stdin_once: Option, /// Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. - #[serde(rename = "terminationMessagePath", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "terminationMessagePath", + skip_serializing_if = "Option::is_none" + )] pub termination_message_path: Option, /// Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. - #[serde(rename = "terminationMessagePolicy", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "terminationMessagePolicy", + skip_serializing_if = "Option::is_none" + )] pub termination_message_policy: Option, /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. #[serde(rename = "tty", skip_serializing_if = "Option::is_none")] @@ -111,5 +114,3 @@ impl ContainerBuilder { } } } - - diff --git a/serving/numaflow-models/src/models/container_template.rs b/serving/numaflow-models/src/models/container_template.rs index 737fbf7701..4c85b7a7f6 100644 --- a/serving/numaflow-models/src/models/container_template.rs +++ b/serving/numaflow-models/src/models/container_template.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// ContainerTemplate : ContainerTemplate defines customized spec for a container - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ContainerTemplate { #[serde(rename = "env", skip_serializing_if = "Option::is_none")] @@ -38,5 +36,3 @@ impl ContainerTemplate { } } } - - diff --git a/serving/numaflow-models/src/models/daemon_template.rs b/serving/numaflow-models/src/models/daemon_template.rs index 82c8e4ecc5..97290e6fac 100644 --- a/serving/numaflow-models/src/models/daemon_template.rs +++ b/serving/numaflow-models/src/models/daemon_template.rs @@ -4,19 +4,19 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DaemonTemplate { #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] pub affinity: Option, /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "automountServiceAccountToken", + skip_serializing_if = "Option::is_none" + )] pub automount_service_account_token: Option, #[serde(rename = "containerTemplate", skip_serializing_if = "Option::is_none")] pub container_template: Option>, @@ -28,7 +28,10 @@ pub struct DaemonTemplate { /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] pub image_pull_secrets: Option>, - #[serde(rename = "initContainerTemplate", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "initContainerTemplate", + skip_serializing_if = "Option::is_none" + )] pub init_container_template: Option>, #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] pub metadata: Option>, @@ -79,5 +82,3 @@ impl DaemonTemplate { } } } - - diff --git a/serving/numaflow-models/src/models/edge.rs b/serving/numaflow-models/src/models/edge.rs index 33afab8014..95a3b9742b 100644 --- a/serving/numaflow-models/src/models/edge.rs +++ b/serving/numaflow-models/src/models/edge.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Edge { #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] @@ -34,5 +31,3 @@ impl Edge { } } } - - diff --git a/serving/numaflow-models/src/models/fixed_window.rs b/serving/numaflow-models/src/models/fixed_window.rs index e62eee6d1a..76a3a9dc1c 100644 --- a/serving/numaflow-models/src/models/fixed_window.rs +++ b/serving/numaflow-models/src/models/fixed_window.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// FixedWindow : FixedWindow describes a fixed window - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct FixedWindow { #[serde(rename = "length", skip_serializing_if = "Option::is_none")] @@ -30,5 +28,3 @@ impl FixedWindow { } } } - - diff --git a/serving/numaflow-models/src/models/forward_conditions.rs b/serving/numaflow-models/src/models/forward_conditions.rs index c75cb03634..ea99358902 100644 --- a/serving/numaflow-models/src/models/forward_conditions.rs +++ b/serving/numaflow-models/src/models/forward_conditions.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ForwardConditions { #[serde(rename = "tags")] @@ -24,5 +21,3 @@ impl ForwardConditions { } } } - - diff --git a/serving/numaflow-models/src/models/function.rs b/serving/numaflow-models/src/models/function.rs index 147fd444fe..8081c827fe 100644 --- a/serving/numaflow-models/src/models/function.rs +++ b/serving/numaflow-models/src/models/function.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Function { #[serde(rename = "args", skip_serializing_if = "Option::is_none")] @@ -30,5 +27,3 @@ impl Function { } } } - - diff --git a/serving/numaflow-models/src/models/generator_source.rs b/serving/numaflow-models/src/models/generator_source.rs index d22bb877da..268652d0e3 100644 --- a/serving/numaflow-models/src/models/generator_source.rs +++ b/serving/numaflow-models/src/models/generator_source.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GeneratorSource { #[serde(rename = "duration", skip_serializing_if = "Option::is_none")] @@ -46,5 +43,3 @@ impl GeneratorSource { } } } - - diff --git a/serving/numaflow-models/src/models/get_container_req.rs b/serving/numaflow-models/src/models/get_container_req.rs index 5b2695f98f..2d97d73f1b 100644 --- a/serving/numaflow-models/src/models/get_container_req.rs +++ b/serving/numaflow-models/src/models/get_container_req.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetContainerReq { #[serde(rename = "env")] @@ -28,7 +25,14 @@ pub struct GetContainerReq { } impl GetContainerReq { - pub fn new(env: Vec, image: String, image_pull_policy: String, isb_svc_type: String, resources: k8s_openapi::api::core::v1::ResourceRequirements, volume_mounts: Vec) -> GetContainerReq { + pub fn new( + env: Vec, + image: String, + image_pull_policy: String, + isb_svc_type: String, + resources: k8s_openapi::api::core::v1::ResourceRequirements, + volume_mounts: Vec, + ) -> GetContainerReq { GetContainerReq { env, image, @@ -39,5 +43,3 @@ impl GetContainerReq { } } } - - diff --git a/serving/numaflow-models/src/models/get_daemon_deployment_req.rs b/serving/numaflow-models/src/models/get_daemon_deployment_req.rs index 4f8a7cc3b9..2c20b6eba2 100644 --- a/serving/numaflow-models/src/models/get_daemon_deployment_req.rs +++ b/serving/numaflow-models/src/models/get_daemon_deployment_req.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetDaemonDeploymentReq { #[serde(rename = "DefaultResources")] @@ -26,7 +23,13 @@ pub struct GetDaemonDeploymentReq { } impl GetDaemonDeploymentReq { - pub fn new(default_resources: k8s_openapi::api::core::v1::ResourceRequirements, env: Vec, isb_svc_type: String, image: String, pull_policy: String) -> GetDaemonDeploymentReq { + pub fn new( + default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + env: Vec, + isb_svc_type: String, + image: String, + pull_policy: String, + ) -> GetDaemonDeploymentReq { GetDaemonDeploymentReq { default_resources, env, @@ -36,5 +39,3 @@ impl GetDaemonDeploymentReq { } } } - - diff --git a/serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs b/serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs index 9c7891afd4..e6168e7d3e 100644 --- a/serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs +++ b/serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetJetStreamServiceSpecReq { #[serde(rename = "ClientPort")] @@ -26,7 +23,13 @@ pub struct GetJetStreamServiceSpecReq { } impl GetJetStreamServiceSpecReq { - pub fn new(client_port: i32, cluster_port: i32, labels: ::std::collections::HashMap, metrics_port: i32, monitor_port: i32) -> GetJetStreamServiceSpecReq { + pub fn new( + client_port: i32, + cluster_port: i32, + labels: ::std::collections::HashMap, + metrics_port: i32, + monitor_port: i32, + ) -> GetJetStreamServiceSpecReq { GetJetStreamServiceSpecReq { client_port, cluster_port, @@ -36,5 +39,3 @@ impl GetJetStreamServiceSpecReq { } } } - - diff --git a/serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs b/serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs index 894ae495fd..ffa11c5bbe 100644 --- a/serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs +++ b/serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetJetStreamStatefulSetSpecReq { #[serde(rename = "ClientPort")] @@ -46,7 +43,23 @@ pub struct GetJetStreamStatefulSetSpecReq { } impl GetJetStreamStatefulSetSpecReq { - pub fn new(client_port: i32, cluster_port: i32, config_map_name: String, config_reloader_image: String, default_resources: k8s_openapi::api::core::v1::ResourceRequirements, labels: ::std::collections::HashMap, metrics_exporter_image: String, metrics_port: i32, monitor_port: i32, nats_image: String, pvc_name_if_needed: String, server_auth_secret_name: String, server_encryption_secret_name: String, service_name: String, start_command: String) -> GetJetStreamStatefulSetSpecReq { + pub fn new( + client_port: i32, + cluster_port: i32, + config_map_name: String, + config_reloader_image: String, + default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + labels: ::std::collections::HashMap, + metrics_exporter_image: String, + metrics_port: i32, + monitor_port: i32, + nats_image: String, + pvc_name_if_needed: String, + server_auth_secret_name: String, + server_encryption_secret_name: String, + service_name: String, + start_command: String, + ) -> GetJetStreamStatefulSetSpecReq { GetJetStreamStatefulSetSpecReq { client_port, cluster_port, @@ -66,5 +79,3 @@ impl GetJetStreamStatefulSetSpecReq { } } } - - diff --git a/serving/numaflow-models/src/models/get_redis_service_spec_req.rs b/serving/numaflow-models/src/models/get_redis_service_spec_req.rs index 651a67d08e..df584ea71a 100644 --- a/serving/numaflow-models/src/models/get_redis_service_spec_req.rs +++ b/serving/numaflow-models/src/models/get_redis_service_spec_req.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetRedisServiceSpecReq { #[serde(rename = "Labels")] @@ -22,7 +19,11 @@ pub struct GetRedisServiceSpecReq { } impl GetRedisServiceSpecReq { - pub fn new(labels: ::std::collections::HashMap, redis_container_port: i32, sentinel_container_port: i32) -> GetRedisServiceSpecReq { + pub fn new( + labels: ::std::collections::HashMap, + redis_container_port: i32, + sentinel_container_port: i32, + ) -> GetRedisServiceSpecReq { GetRedisServiceSpecReq { labels, redis_container_port, @@ -30,5 +31,3 @@ impl GetRedisServiceSpecReq { } } } - - diff --git a/serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs b/serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs index 736ea82fa4..81cc11b7c9 100644 --- a/serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs +++ b/serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetRedisStatefulSetSpecReq { #[serde(rename = "ConfConfigMapName")] @@ -48,7 +45,24 @@ pub struct GetRedisStatefulSetSpecReq { } impl GetRedisStatefulSetSpecReq { - pub fn new(conf_config_map_name: String, credential_secret_name: String, default_resources: k8s_openapi::api::core::v1::ResourceRequirements, health_config_map_name: String, init_container_image: String, labels: ::std::collections::HashMap, metrics_exporter_image: String, pvc_name_if_needed: String, redis_container_port: i32, redis_image: String, redis_metrics_container_port: i32, scripts_config_map_name: String, sentinel_container_port: i32, sentinel_image: String, service_name: String, tls_enabled: bool) -> GetRedisStatefulSetSpecReq { + pub fn new( + conf_config_map_name: String, + credential_secret_name: String, + default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + health_config_map_name: String, + init_container_image: String, + labels: ::std::collections::HashMap, + metrics_exporter_image: String, + pvc_name_if_needed: String, + redis_container_port: i32, + redis_image: String, + redis_metrics_container_port: i32, + scripts_config_map_name: String, + sentinel_container_port: i32, + sentinel_image: String, + service_name: String, + tls_enabled: bool, + ) -> GetRedisStatefulSetSpecReq { GetRedisStatefulSetSpecReq { conf_config_map_name, credential_secret_name, @@ -69,5 +83,3 @@ impl GetRedisStatefulSetSpecReq { } } } - - diff --git a/serving/numaflow-models/src/models/get_side_input_deployment_req.rs b/serving/numaflow-models/src/models/get_side_input_deployment_req.rs index a2d389d107..f548abcf4d 100644 --- a/serving/numaflow-models/src/models/get_side_input_deployment_req.rs +++ b/serving/numaflow-models/src/models/get_side_input_deployment_req.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetSideInputDeploymentReq { #[serde(rename = "DefaultResources")] @@ -26,7 +23,13 @@ pub struct GetSideInputDeploymentReq { } impl GetSideInputDeploymentReq { - pub fn new(default_resources: k8s_openapi::api::core::v1::ResourceRequirements, env: Vec, isb_svc_type: String, image: String, pull_policy: String) -> GetSideInputDeploymentReq { + pub fn new( + default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + env: Vec, + isb_svc_type: String, + image: String, + pull_policy: String, + ) -> GetSideInputDeploymentReq { GetSideInputDeploymentReq { default_resources, env, @@ -36,5 +39,3 @@ impl GetSideInputDeploymentReq { } } } - - diff --git a/serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs b/serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs index 26f6185385..bd976cf1f9 100644 --- a/serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs +++ b/serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetVertexPodSpecReq { #[serde(rename = "DefaultResources")] @@ -32,7 +29,16 @@ pub struct GetVertexPodSpecReq { } impl GetVertexPodSpecReq { - pub fn new(default_resources: k8s_openapi::api::core::v1::ResourceRequirements, env: Vec, isb_svc_type: String, image: String, pipeline_spec: crate::models::PipelineSpec, pull_policy: String, serving_source_stream_name: String, side_inputs_store_name: String) -> GetVertexPodSpecReq { + pub fn new( + default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + env: Vec, + isb_svc_type: String, + image: String, + pipeline_spec: crate::models::PipelineSpec, + pull_policy: String, + serving_source_stream_name: String, + side_inputs_store_name: String, + ) -> GetVertexPodSpecReq { GetVertexPodSpecReq { default_resources, env, @@ -45,5 +51,3 @@ impl GetVertexPodSpecReq { } } } - - diff --git a/serving/numaflow-models/src/models/group_by.rs b/serving/numaflow-models/src/models/group_by.rs index 605b3bc61a..915e8a5bca 100644 --- a/serving/numaflow-models/src/models/group_by.rs +++ b/serving/numaflow-models/src/models/group_by.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// GroupBy : GroupBy indicates it is a reducer UDF - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GroupBy { #[serde(rename = "allowedLateness", skip_serializing_if = "Option::is_none")] @@ -35,5 +33,3 @@ impl GroupBy { } } } - - diff --git a/serving/numaflow-models/src/models/gssapi.rs b/serving/numaflow-models/src/models/gssapi.rs index c5fe33aad6..d040f83cb7 100644 --- a/serving/numaflow-models/src/models/gssapi.rs +++ b/serving/numaflow-models/src/models/gssapi.rs @@ -4,20 +4,21 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// Gssapi : GSSAPI represents a SASL GSSAPI config - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Gssapi { /// valid inputs - KRB5_USER_AUTH, KRB5_KEYTAB_AUTH Possible enum values: - `\"KRB5_KEYTAB_AUTH\"` represents the password method KRB5KeytabAuth = \"KRB5_KEYTAB_AUTH\" = 2 - `\"KRB5_USER_AUTH\"` represents the password method KRB5UserAuth = \"KRB5_USER_AUTH\" = 1 #[serde(rename = "authType")] pub auth_type: AuthType, - #[serde(rename = "kerberosConfigSecret", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "kerberosConfigSecret", + skip_serializing_if = "Option::is_none" + )] pub kerberos_config_secret: Option, #[serde(rename = "keytabSecret", skip_serializing_if = "Option::is_none")] pub keytab_secret: Option, @@ -33,7 +34,12 @@ pub struct Gssapi { impl Gssapi { /// GSSAPI represents a SASL GSSAPI config - pub fn new(auth_type: AuthType, realm: String, service_name: String, username_secret: k8s_openapi::api::core::v1::SecretKeySelector) -> Gssapi { + pub fn new( + auth_type: AuthType, + realm: String, + service_name: String, + username_secret: k8s_openapi::api::core::v1::SecretKeySelector, + ) -> Gssapi { Gssapi { auth_type, kerberos_config_secret: None, @@ -60,4 +66,3 @@ impl Default for AuthType { Self::KeytabAuth } } - diff --git a/serving/numaflow-models/src/models/http_source.rs b/serving/numaflow-models/src/models/http_source.rs index 8a5b7c7c97..e5178c86e3 100644 --- a/serving/numaflow-models/src/models/http_source.rs +++ b/serving/numaflow-models/src/models/http_source.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct HttpSource { #[serde(rename = "auth", skip_serializing_if = "Option::is_none")] @@ -28,5 +25,3 @@ impl HttpSource { } } } - - diff --git a/serving/numaflow-models/src/models/idle_source.rs b/serving/numaflow-models/src/models/idle_source.rs index 3d593c5375..cbe629ae9f 100644 --- a/serving/numaflow-models/src/models/idle_source.rs +++ b/serving/numaflow-models/src/models/idle_source.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IdleSource { #[serde(rename = "incrementBy", skip_serializing_if = "Option::is_none")] @@ -30,5 +27,3 @@ impl IdleSource { } } } - - diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service.rs b/serving/numaflow-models/src/models/inter_step_buffer_service.rs index fc6154b756..8e6a9e20f4 100644 --- a/serving/numaflow-models/src/models/inter_step_buffer_service.rs +++ b/serving/numaflow-models/src/models/inter_step_buffer_service.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InterStepBufferService { /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources @@ -38,5 +35,3 @@ impl InterStepBufferService { } } } - - diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service_list.rs b/serving/numaflow-models/src/models/inter_step_buffer_service_list.rs index e933727ce5..4ef7a04c49 100644 --- a/serving/numaflow-models/src/models/inter_step_buffer_service_list.rs +++ b/serving/numaflow-models/src/models/inter_step_buffer_service_list.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// InterStepBufferServiceList : InterStepBufferServiceList is the list of InterStepBufferService resources - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InterStepBufferServiceList { /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources @@ -28,7 +26,10 @@ pub struct InterStepBufferServiceList { impl InterStepBufferServiceList { /// InterStepBufferServiceList is the list of InterStepBufferService resources - pub fn new(items: Vec, metadata: k8s_openapi::apimachinery::pkg::apis::meta::v1::ListMeta) -> InterStepBufferServiceList { + pub fn new( + items: Vec, + metadata: k8s_openapi::apimachinery::pkg::apis::meta::v1::ListMeta, + ) -> InterStepBufferServiceList { InterStepBufferServiceList { api_version: None, items, @@ -37,5 +38,3 @@ impl InterStepBufferServiceList { } } } - - diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs b/serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs index 79be40fa45..2de93da314 100644 --- a/serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs +++ b/serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InterStepBufferServiceSpec { #[serde(rename = "jetstream", skip_serializing_if = "Option::is_none")] @@ -27,5 +24,3 @@ impl InterStepBufferServiceSpec { } } } - - diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service_status.rs b/serving/numaflow-models/src/models/inter_step_buffer_service_status.rs index 6cf0a96745..43c7cd16cf 100644 --- a/serving/numaflow-models/src/models/inter_step_buffer_service_status.rs +++ b/serving/numaflow-models/src/models/inter_step_buffer_service_status.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InterStepBufferServiceStatus { /// Conditions are the latest available observations of a resource's current state. @@ -40,5 +37,3 @@ impl InterStepBufferServiceStatus { } } } - - diff --git a/serving/numaflow-models/src/models/jet_stream_buffer_service.rs b/serving/numaflow-models/src/models/jet_stream_buffer_service.rs index 9441f6d156..de5392a051 100644 --- a/serving/numaflow-models/src/models/jet_stream_buffer_service.rs +++ b/serving/numaflow-models/src/models/jet_stream_buffer_service.rs @@ -4,19 +4,19 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JetStreamBufferService { #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] pub affinity: Option, /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "automountServiceAccountToken", + skip_serializing_if = "Option::is_none" + )] pub automount_service_account_token: Option, /// Optional configuration for the streams, consumers and buckets to be created in this JetStream service, if specified, it will be merged with the default configuration in numaflow-controller-config. It accepts a YAML format configuration, it may include 4 sections, \"stream\", \"consumer\", \"otBucket\" and \"procBucket\". Available fields under \"stream\" include \"retention\" (e.g. interest, limits, workerQueue), \"maxMsgs\", \"maxAge\" (e.g. 72h), \"replicas\" (1, 3, 5), \"duplicates\" (e.g. 5m). Available fields under \"consumer\" include \"ackWait\" (e.g. 60s) Available fields under \"otBucket\" include \"maxValueSize\", \"history\", \"ttl\" (e.g. 72h), \"maxBytes\", \"replicas\" (1, 3, 5). Available fields under \"procBucket\" include \"maxValueSize\", \"history\", \"ttl\" (e.g. 72h), \"maxBytes\", \"replicas\" (1, 3, 5). #[serde(rename = "bufferConfig", skip_serializing_if = "Option::is_none")] @@ -36,7 +36,10 @@ pub struct JetStreamBufferService { pub image_pull_secrets: Option>, #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] pub metadata: Option>, - #[serde(rename = "metricsContainerTemplate", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "metricsContainerTemplate", + skip_serializing_if = "Option::is_none" + )] pub metrics_container_template: Option>, /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ #[serde(rename = "nodeSelector", skip_serializing_if = "Option::is_none")] @@ -49,7 +52,10 @@ pub struct JetStreamBufferService { /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] pub priority_class_name: Option, - #[serde(rename = "reloaderContainerTemplate", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "reloaderContainerTemplate", + skip_serializing_if = "Option::is_none" + )] pub reloader_container_template: Option>, /// JetStream StatefulSet size #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] @@ -109,5 +115,3 @@ impl JetStreamBufferService { } } } - - diff --git a/serving/numaflow-models/src/models/jet_stream_config.rs b/serving/numaflow-models/src/models/jet_stream_config.rs index bc4c648c1a..d62ceab186 100644 --- a/serving/numaflow-models/src/models/jet_stream_config.rs +++ b/serving/numaflow-models/src/models/jet_stream_config.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JetStreamConfig { #[serde(rename = "auth", skip_serializing_if = "Option::is_none")] @@ -35,5 +32,3 @@ impl JetStreamConfig { } } } - - diff --git a/serving/numaflow-models/src/models/jet_stream_source.rs b/serving/numaflow-models/src/models/jet_stream_source.rs index 6da52b583c..c1040259e3 100644 --- a/serving/numaflow-models/src/models/jet_stream_source.rs +++ b/serving/numaflow-models/src/models/jet_stream_source.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JetStreamSource { #[serde(rename = "auth", skip_serializing_if = "Option::is_none")] @@ -35,5 +32,3 @@ impl JetStreamSource { } } } - - diff --git a/serving/numaflow-models/src/models/job_template.rs b/serving/numaflow-models/src/models/job_template.rs index dcae7d906a..9ffe8e162e 100644 --- a/serving/numaflow-models/src/models/job_template.rs +++ b/serving/numaflow-models/src/models/job_template.rs @@ -4,19 +4,19 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobTemplate { #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] pub affinity: Option, /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "automountServiceAccountToken", + skip_serializing_if = "Option::is_none" + )] pub automount_service_account_token: Option, /// Specifies the number of retries before marking this job failed. More info: https://kubernetes.io/docs/concepts/workloads/controllers/job/#pod-backoff-failure-policy Numaflow defaults to 20 #[serde(rename = "backoffLimit", skip_serializing_if = "Option::is_none")] @@ -54,7 +54,10 @@ pub struct JobTemplate { #[serde(rename = "tolerations", skip_serializing_if = "Option::is_none")] pub tolerations: Option>, /// ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. Numaflow defaults to 30 - #[serde(rename = "ttlSecondsAfterFinished", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "ttlSecondsAfterFinished", + skip_serializing_if = "Option::is_none" + )] pub ttl_seconds_after_finished: Option, } @@ -80,5 +83,3 @@ impl JobTemplate { } } } - - diff --git a/serving/numaflow-models/src/models/kafka_sink.rs b/serving/numaflow-models/src/models/kafka_sink.rs index 3b2c730fa5..99dabd7a4e 100644 --- a/serving/numaflow-models/src/models/kafka_sink.rs +++ b/serving/numaflow-models/src/models/kafka_sink.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct KafkaSink { #[serde(rename = "brokers", skip_serializing_if = "Option::is_none")] @@ -36,5 +33,3 @@ impl KafkaSink { } } } - - diff --git a/serving/numaflow-models/src/models/kafka_source.rs b/serving/numaflow-models/src/models/kafka_source.rs index 078eeee69a..b0771d8e33 100644 --- a/serving/numaflow-models/src/models/kafka_source.rs +++ b/serving/numaflow-models/src/models/kafka_source.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct KafkaSource { #[serde(rename = "brokers", skip_serializing_if = "Option::is_none")] @@ -39,5 +36,3 @@ impl KafkaSource { } } } - - diff --git a/serving/numaflow-models/src/models/lifecycle.rs b/serving/numaflow-models/src/models/lifecycle.rs index 3c1a478171..75065cf816 100644 --- a/serving/numaflow-models/src/models/lifecycle.rs +++ b/serving/numaflow-models/src/models/lifecycle.rs @@ -4,23 +4,26 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Lifecycle { /// DeleteGracePeriodSeconds used to delete pipeline gracefully - #[serde(rename = "deleteGracePeriodSeconds", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "deleteGracePeriodSeconds", + skip_serializing_if = "Option::is_none" + )] pub delete_grace_period_seconds: Option, /// DesiredPhase used to bring the pipeline from current phase to desired phase #[serde(rename = "desiredPhase", skip_serializing_if = "Option::is_none")] pub desired_phase: Option, /// PauseGracePeriodSeconds used to pause pipeline gracefully - #[serde(rename = "pauseGracePeriodSeconds", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "pauseGracePeriodSeconds", + skip_serializing_if = "Option::is_none" + )] pub pause_grace_period_seconds: Option, } @@ -33,5 +36,3 @@ impl Lifecycle { } } } - - diff --git a/serving/numaflow-models/src/models/log.rs b/serving/numaflow-models/src/models/log.rs index 141bb39e94..c1452a70ff 100644 --- a/serving/numaflow-models/src/models/log.rs +++ b/serving/numaflow-models/src/models/log.rs @@ -4,22 +4,15 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct Log { -} +pub struct Log {} impl Log { pub fn new() -> Log { - Log { - } + Log {} } } - - diff --git a/serving/numaflow-models/src/models/metadata.rs b/serving/numaflow-models/src/models/metadata.rs index 13c95358f6..56c214a01e 100644 --- a/serving/numaflow-models/src/models/metadata.rs +++ b/serving/numaflow-models/src/models/metadata.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Metadata { #[serde(rename = "annotations", skip_serializing_if = "Option::is_none")] @@ -27,5 +24,3 @@ impl Metadata { } } } - - diff --git a/serving/numaflow-models/src/models/native_redis.rs b/serving/numaflow-models/src/models/native_redis.rs index c538f13b2a..a22fb37436 100644 --- a/serving/numaflow-models/src/models/native_redis.rs +++ b/serving/numaflow-models/src/models/native_redis.rs @@ -4,19 +4,19 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NativeRedis { #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] pub affinity: Option, /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "automountServiceAccountToken", + skip_serializing_if = "Option::is_none" + )] pub automount_service_account_token: Option, #[serde(rename = "dnsConfig", skip_serializing_if = "Option::is_none")] pub dns_config: Option, @@ -26,11 +26,17 @@ pub struct NativeRedis { /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] pub image_pull_secrets: Option>, - #[serde(rename = "initContainerTemplate", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "initContainerTemplate", + skip_serializing_if = "Option::is_none" + )] pub init_container_template: Option>, #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] pub metadata: Option>, - #[serde(rename = "metricsContainerTemplate", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "metricsContainerTemplate", + skip_serializing_if = "Option::is_none" + )] pub metrics_container_template: Option>, /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ #[serde(rename = "nodeSelector", skip_serializing_if = "Option::is_none")] @@ -43,7 +49,10 @@ pub struct NativeRedis { /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] pub priority_class_name: Option, - #[serde(rename = "redisContainerTemplate", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "redisContainerTemplate", + skip_serializing_if = "Option::is_none" + )] pub redis_container_template: Option>, /// Redis StatefulSet size #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] @@ -53,7 +62,10 @@ pub struct NativeRedis { pub runtime_class_name: Option, #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] pub security_context: Option, - #[serde(rename = "sentinelContainerTemplate", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "sentinelContainerTemplate", + skip_serializing_if = "Option::is_none" + )] pub sentinel_container_template: Option>, /// ServiceAccountName applied to the pod #[serde(rename = "serviceAccountName", skip_serializing_if = "Option::is_none")] @@ -95,5 +107,3 @@ impl NativeRedis { } } } - - diff --git a/serving/numaflow-models/src/models/nats_auth.rs b/serving/numaflow-models/src/models/nats_auth.rs index 8c5c977109..7b085650f9 100644 --- a/serving/numaflow-models/src/models/nats_auth.rs +++ b/serving/numaflow-models/src/models/nats_auth.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// NatsAuth : NatsAuth defines how to authenticate the nats access - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NatsAuth { #[serde(rename = "basic", skip_serializing_if = "Option::is_none")] @@ -32,5 +30,3 @@ impl NatsAuth { } } } - - diff --git a/serving/numaflow-models/src/models/nats_source.rs b/serving/numaflow-models/src/models/nats_source.rs index d60f175c23..666fcbb884 100644 --- a/serving/numaflow-models/src/models/nats_source.rs +++ b/serving/numaflow-models/src/models/nats_source.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NatsSource { #[serde(rename = "auth", skip_serializing_if = "Option::is_none")] @@ -39,5 +36,3 @@ impl NatsSource { } } } - - diff --git a/serving/numaflow-models/src/models/no_store.rs b/serving/numaflow-models/src/models/no_store.rs index 187e93d96b..f91247af2f 100644 --- a/serving/numaflow-models/src/models/no_store.rs +++ b/serving/numaflow-models/src/models/no_store.rs @@ -4,24 +4,18 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// NoStore : NoStore means there will be no persistence storage and there will be data loss during pod restarts. Use this option only if you do not care about correctness (e.g., approx statistics pipeline like sampling rate, etc.). - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct NoStore { -} +pub struct NoStore {} impl NoStore { /// NoStore means there will be no persistence storage and there will be data loss during pod restarts. Use this option only if you do not care about correctness (e.g., approx statistics pipeline like sampling rate, etc.). pub fn new() -> NoStore { - NoStore { - } + NoStore {} } } - - diff --git a/serving/numaflow-models/src/models/pbq_storage.rs b/serving/numaflow-models/src/models/pbq_storage.rs index ea9cb9514e..c01ffe9eba 100644 --- a/serving/numaflow-models/src/models/pbq_storage.rs +++ b/serving/numaflow-models/src/models/pbq_storage.rs @@ -4,21 +4,22 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// PbqStorage : PBQStorage defines the persistence configuration for a vertex. - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PbqStorage { #[serde(rename = "emptyDir", skip_serializing_if = "Option::is_none")] pub empty_dir: Option, #[serde(rename = "no_store", skip_serializing_if = "Option::is_none")] pub no_store: Option>, - #[serde(rename = "persistentVolumeClaim", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "persistentVolumeClaim", + skip_serializing_if = "Option::is_none" + )] pub persistent_volume_claim: Option>, } @@ -32,5 +33,3 @@ impl PbqStorage { } } } - - diff --git a/serving/numaflow-models/src/models/persistence_strategy.rs b/serving/numaflow-models/src/models/persistence_strategy.rs index 49351a146d..3541e83b91 100644 --- a/serving/numaflow-models/src/models/persistence_strategy.rs +++ b/serving/numaflow-models/src/models/persistence_strategy.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// PersistenceStrategy : PersistenceStrategy defines the strategy of persistence - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PersistenceStrategy { /// Available access modes such as ReadWriteOnce, ReadWriteMany https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes @@ -34,5 +32,3 @@ impl PersistenceStrategy { } } } - - diff --git a/serving/numaflow-models/src/models/pipeline.rs b/serving/numaflow-models/src/models/pipeline.rs index e6d7054b36..52db105f24 100644 --- a/serving/numaflow-models/src/models/pipeline.rs +++ b/serving/numaflow-models/src/models/pipeline.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Pipeline { /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources @@ -38,5 +35,3 @@ impl Pipeline { } } } - - diff --git a/serving/numaflow-models/src/models/pipeline_limits.rs b/serving/numaflow-models/src/models/pipeline_limits.rs index 3e3a2e58f1..c4a158e6ae 100644 --- a/serving/numaflow-models/src/models/pipeline_limits.rs +++ b/serving/numaflow-models/src/models/pipeline_limits.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PipelineLimits { /// BufferMaxLength is used to define the max length of a buffer. Only applies to UDF and Source vertices as only they do buffer write. It can be overridden by the settings in vertex limits. @@ -36,5 +33,3 @@ impl PipelineLimits { } } } - - diff --git a/serving/numaflow-models/src/models/pipeline_list.rs b/serving/numaflow-models/src/models/pipeline_list.rs index a159018e5e..7fc027c1a7 100644 --- a/serving/numaflow-models/src/models/pipeline_list.rs +++ b/serving/numaflow-models/src/models/pipeline_list.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PipelineList { /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources @@ -35,5 +32,3 @@ impl PipelineList { } } } - - diff --git a/serving/numaflow-models/src/models/pipeline_spec.rs b/serving/numaflow-models/src/models/pipeline_spec.rs index 66c29acf08..c33cdb1f80 100644 --- a/serving/numaflow-models/src/models/pipeline_spec.rs +++ b/serving/numaflow-models/src/models/pipeline_spec.rs @@ -4,19 +4,19 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PipelineSpec { /// Edges define the relationships between vertices #[serde(rename = "edges", skip_serializing_if = "Option::is_none")] pub edges: Option>, - #[serde(rename = "interStepBufferServiceName", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "interStepBufferServiceName", + skip_serializing_if = "Option::is_none" + )] pub inter_step_buffer_service_name: Option, #[serde(rename = "lifecycle", skip_serializing_if = "Option::is_none")] pub lifecycle: Option>, @@ -47,5 +47,3 @@ impl PipelineSpec { } } } - - diff --git a/serving/numaflow-models/src/models/pipeline_status.rs b/serving/numaflow-models/src/models/pipeline_status.rs index 64e8934fab..8756844a7c 100644 --- a/serving/numaflow-models/src/models/pipeline_status.rs +++ b/serving/numaflow-models/src/models/pipeline_status.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PipelineStatus { /// Conditions are the latest available observations of a resource's current state. @@ -55,5 +52,3 @@ impl PipelineStatus { } } } - - diff --git a/serving/numaflow-models/src/models/redis_buffer_service.rs b/serving/numaflow-models/src/models/redis_buffer_service.rs index 10994749ff..5dc527b391 100644 --- a/serving/numaflow-models/src/models/redis_buffer_service.rs +++ b/serving/numaflow-models/src/models/redis_buffer_service.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RedisBufferService { #[serde(rename = "external", skip_serializing_if = "Option::is_none")] @@ -27,5 +24,3 @@ impl RedisBufferService { } } } - - diff --git a/serving/numaflow-models/src/models/redis_config.rs b/serving/numaflow-models/src/models/redis_config.rs index 9c6c5180e6..cf563ab9ec 100644 --- a/serving/numaflow-models/src/models/redis_config.rs +++ b/serving/numaflow-models/src/models/redis_config.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RedisConfig { /// Only required when Sentinel is used @@ -43,5 +40,3 @@ impl RedisConfig { } } } - - diff --git a/serving/numaflow-models/src/models/redis_settings.rs b/serving/numaflow-models/src/models/redis_settings.rs index 3b8e813a28..f5b4b4c2b7 100644 --- a/serving/numaflow-models/src/models/redis_settings.rs +++ b/serving/numaflow-models/src/models/redis_settings.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RedisSettings { /// Special settings for Redis master node, will override the global settings from controller config @@ -37,5 +34,3 @@ impl RedisSettings { } } } - - diff --git a/serving/numaflow-models/src/models/sasl.rs b/serving/numaflow-models/src/models/sasl.rs index 163eb8a1b0..2f56e8cce0 100644 --- a/serving/numaflow-models/src/models/sasl.rs +++ b/serving/numaflow-models/src/models/sasl.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Sasl { #[serde(rename = "gssapi", skip_serializing_if = "Option::is_none")] @@ -37,5 +34,3 @@ impl Sasl { } } } - - diff --git a/serving/numaflow-models/src/models/sasl_plain.rs b/serving/numaflow-models/src/models/sasl_plain.rs index 085581f975..7719720dd3 100644 --- a/serving/numaflow-models/src/models/sasl_plain.rs +++ b/serving/numaflow-models/src/models/sasl_plain.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SaslPlain { #[serde(rename = "handshake")] @@ -22,7 +19,10 @@ pub struct SaslPlain { } impl SaslPlain { - pub fn new(handshake: bool, user_secret: k8s_openapi::api::core::v1::SecretKeySelector) -> SaslPlain { + pub fn new( + handshake: bool, + user_secret: k8s_openapi::api::core::v1::SecretKeySelector, + ) -> SaslPlain { SaslPlain { handshake, password_secret: None, @@ -30,5 +30,3 @@ impl SaslPlain { } } } - - diff --git a/serving/numaflow-models/src/models/scale.rs b/serving/numaflow-models/src/models/scale.rs index 7510676ce1..03673cca73 100644 --- a/serving/numaflow-models/src/models/scale.rs +++ b/serving/numaflow-models/src/models/scale.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// Scale : Scale defines the parameters for autoscaling. - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Scale { /// Deprecated: Use scaleUpCooldownSeconds and scaleDownCooldownSeconds instead. Cooldown seconds after a scaling operation before another one. @@ -33,19 +31,34 @@ pub struct Scale { #[serde(rename = "replicasPerScale", skip_serializing_if = "Option::is_none")] pub replicas_per_scale: Option, /// ScaleDownCooldownSeconds defines the cooldown seconds after a scaling operation, before a follow-up scaling down. It defaults to the CooldownSeconds if not set. - #[serde(rename = "scaleDownCooldownSeconds", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "scaleDownCooldownSeconds", + skip_serializing_if = "Option::is_none" + )] pub scale_down_cooldown_seconds: Option, /// ScaleUpCooldownSeconds defines the cooldown seconds after a scaling operation, before a follow-up scaling up. It defaults to the CooldownSeconds if not set. - #[serde(rename = "scaleUpCooldownSeconds", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "scaleUpCooldownSeconds", + skip_serializing_if = "Option::is_none" + )] pub scale_up_cooldown_seconds: Option, /// TargetBufferAvailability is used to define the target percentage of the buffer availability. A valid and meaningful value should be less than the BufferUsageLimit defined in the Edge spec (or Pipeline spec), for example, 50. It only applies to UDF and Sink vertices because only they have buffers to read. - #[serde(rename = "targetBufferAvailability", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "targetBufferAvailability", + skip_serializing_if = "Option::is_none" + )] pub target_buffer_availability: Option, /// TargetProcessingSeconds is used to tune the aggressiveness of autoscaling for source vertices, it measures how fast you want the vertex to process all the pending messages. Typically increasing the value, which leads to lower processing rate, thus less replicas. It's only effective for source vertices. - #[serde(rename = "targetProcessingSeconds", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "targetProcessingSeconds", + skip_serializing_if = "Option::is_none" + )] pub target_processing_seconds: Option, /// After scaling down the source vertex to 0, sleep how many seconds before scaling the source vertex back up to peek. - #[serde(rename = "zeroReplicaSleepSeconds", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "zeroReplicaSleepSeconds", + skip_serializing_if = "Option::is_none" + )] pub zero_replica_sleep_seconds: Option, } @@ -67,5 +80,3 @@ impl Scale { } } } - - diff --git a/serving/numaflow-models/src/models/serving_source.rs b/serving/numaflow-models/src/models/serving_source.rs index e1e6c444cf..25703a7031 100644 --- a/serving/numaflow-models/src/models/serving_source.rs +++ b/serving/numaflow-models/src/models/serving_source.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// ServingSource : ServingSource is the HTTP endpoint for Numaflow. - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServingSource { #[serde(rename = "auth", skip_serializing_if = "Option::is_none")] @@ -37,5 +35,3 @@ impl ServingSource { } } } - - diff --git a/serving/numaflow-models/src/models/serving_store.rs b/serving/numaflow-models/src/models/serving_store.rs index 44998b5e73..7a3086aea9 100644 --- a/serving/numaflow-models/src/models/serving_store.rs +++ b/serving/numaflow-models/src/models/serving_store.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// ServingStore : ServingStore to track and store data and metadata for tracking and serving. - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServingStore { #[serde(rename = "ttl", skip_serializing_if = "Option::is_none")] @@ -24,11 +22,6 @@ pub struct ServingStore { impl ServingStore { /// ServingStore to track and store data and metadata for tracking and serving. pub fn new(url: String) -> ServingStore { - ServingStore { - ttl: None, - url, - } + ServingStore { ttl: None, url } } } - - diff --git a/serving/numaflow-models/src/models/session_window.rs b/serving/numaflow-models/src/models/session_window.rs index 67f1f440bc..551f164cde 100644 --- a/serving/numaflow-models/src/models/session_window.rs +++ b/serving/numaflow-models/src/models/session_window.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// SessionWindow : SessionWindow describes a session window - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SessionWindow { #[serde(rename = "timeout", skip_serializing_if = "Option::is_none")] @@ -21,10 +19,6 @@ pub struct SessionWindow { impl SessionWindow { /// SessionWindow describes a session window pub fn new() -> SessionWindow { - SessionWindow { - timeout: None, - } + SessionWindow { timeout: None } } } - - diff --git a/serving/numaflow-models/src/models/side_input.rs b/serving/numaflow-models/src/models/side_input.rs index 9cb2040749..275af684af 100644 --- a/serving/numaflow-models/src/models/side_input.rs +++ b/serving/numaflow-models/src/models/side_input.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// SideInput : SideInput defines information of a Side Input - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SideInput { #[serde(rename = "container")] @@ -26,7 +24,11 @@ pub struct SideInput { impl SideInput { /// SideInput defines information of a Side Input - pub fn new(container: crate::models::Container, name: String, trigger: crate::models::SideInputTrigger) -> SideInput { + pub fn new( + container: crate::models::Container, + name: String, + trigger: crate::models::SideInputTrigger, + ) -> SideInput { SideInput { container: Box::new(container), name, @@ -35,5 +37,3 @@ impl SideInput { } } } - - diff --git a/serving/numaflow-models/src/models/side_input_trigger.rs b/serving/numaflow-models/src/models/side_input_trigger.rs index f92cff7cd9..497f5461b3 100644 --- a/serving/numaflow-models/src/models/side_input_trigger.rs +++ b/serving/numaflow-models/src/models/side_input_trigger.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SideInputTrigger { /// The schedule to trigger the retrievement of the side input data. It supports cron format, for example, \"0 30 * * * *\". Or interval based format, such as \"@hourly\", \"@every 1h30m\", etc. @@ -28,5 +25,3 @@ impl SideInputTrigger { } } } - - diff --git a/serving/numaflow-models/src/models/side_inputs_manager_template.rs b/serving/numaflow-models/src/models/side_inputs_manager_template.rs index d9a8e6a4fc..82f7afe5d1 100644 --- a/serving/numaflow-models/src/models/side_inputs_manager_template.rs +++ b/serving/numaflow-models/src/models/side_inputs_manager_template.rs @@ -4,19 +4,19 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SideInputsManagerTemplate { #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] pub affinity: Option, /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "automountServiceAccountToken", + skip_serializing_if = "Option::is_none" + )] pub automount_service_account_token: Option, #[serde(rename = "containerTemplate", skip_serializing_if = "Option::is_none")] pub container_template: Option>, @@ -28,7 +28,10 @@ pub struct SideInputsManagerTemplate { /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] pub image_pull_secrets: Option>, - #[serde(rename = "initContainerTemplate", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "initContainerTemplate", + skip_serializing_if = "Option::is_none" + )] pub init_container_template: Option>, #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] pub metadata: Option>, @@ -75,5 +78,3 @@ impl SideInputsManagerTemplate { } } } - - diff --git a/serving/numaflow-models/src/models/sink.rs b/serving/numaflow-models/src/models/sink.rs index ebed76c690..6cd57bb2ec 100644 --- a/serving/numaflow-models/src/models/sink.rs +++ b/serving/numaflow-models/src/models/sink.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Sink { #[serde(rename = "blackhole", skip_serializing_if = "Option::is_none")] @@ -36,5 +33,3 @@ impl Sink { } } } - - diff --git a/serving/numaflow-models/src/models/sliding_window.rs b/serving/numaflow-models/src/models/sliding_window.rs index 10530f04b3..4d2f9a06c7 100644 --- a/serving/numaflow-models/src/models/sliding_window.rs +++ b/serving/numaflow-models/src/models/sliding_window.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// SlidingWindow : SlidingWindow describes a sliding window - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SlidingWindow { #[serde(rename = "length", skip_serializing_if = "Option::is_none")] @@ -33,5 +31,3 @@ impl SlidingWindow { } } } - - diff --git a/serving/numaflow-models/src/models/source.rs b/serving/numaflow-models/src/models/source.rs index fadfe1a402..b7331b4942 100644 --- a/serving/numaflow-models/src/models/source.rs +++ b/serving/numaflow-models/src/models/source.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Source { #[serde(rename = "generator", skip_serializing_if = "Option::is_none")] @@ -45,5 +42,3 @@ impl Source { } } } - - diff --git a/serving/numaflow-models/src/models/status.rs b/serving/numaflow-models/src/models/status.rs index accf9e49d2..cbfa91bf3d 100644 --- a/serving/numaflow-models/src/models/status.rs +++ b/serving/numaflow-models/src/models/status.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// Status : Status is a common structure which can be used for Status field. - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Status { /// Conditions are the latest available observations of a resource's current state. @@ -22,10 +20,6 @@ pub struct Status { impl Status { /// Status is a common structure which can be used for Status field. pub fn new() -> Status { - Status { - conditions: None, - } + Status { conditions: None } } } - - diff --git a/serving/numaflow-models/src/models/tag_conditions.rs b/serving/numaflow-models/src/models/tag_conditions.rs index 42b6b94e0a..60d4b3ca58 100644 --- a/serving/numaflow-models/src/models/tag_conditions.rs +++ b/serving/numaflow-models/src/models/tag_conditions.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TagConditions { /// Operator specifies the type of operation that should be used for conditional forwarding value could be \"and\", \"or\", \"not\" @@ -29,5 +26,3 @@ impl TagConditions { } } } - - diff --git a/serving/numaflow-models/src/models/templates.rs b/serving/numaflow-models/src/models/templates.rs index 382019d5b5..98876b49a8 100644 --- a/serving/numaflow-models/src/models/templates.rs +++ b/serving/numaflow-models/src/models/templates.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Templates { #[serde(rename = "daemon", skip_serializing_if = "Option::is_none")] @@ -33,5 +30,3 @@ impl Templates { } } } - - diff --git a/serving/numaflow-models/src/models/tls.rs b/serving/numaflow-models/src/models/tls.rs index b140b68531..b0c896e8ea 100644 --- a/serving/numaflow-models/src/models/tls.rs +++ b/serving/numaflow-models/src/models/tls.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Tls { #[serde(rename = "caCertSecret", skip_serializing_if = "Option::is_none")] @@ -33,5 +30,3 @@ impl Tls { } } } - - diff --git a/serving/numaflow-models/src/models/transformer.rs b/serving/numaflow-models/src/models/transformer.rs index 5540a6b6f9..a17255f010 100644 --- a/serving/numaflow-models/src/models/transformer.rs +++ b/serving/numaflow-models/src/models/transformer.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Transformer { #[serde(rename = "args", skip_serializing_if = "Option::is_none")] @@ -30,5 +27,3 @@ impl Transformer { } } } - - diff --git a/serving/numaflow-models/src/models/ud_sink.rs b/serving/numaflow-models/src/models/ud_sink.rs index f39a053de5..f247400f86 100644 --- a/serving/numaflow-models/src/models/ud_sink.rs +++ b/serving/numaflow-models/src/models/ud_sink.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UdSink { #[serde(rename = "container")] @@ -24,5 +21,3 @@ impl UdSink { } } } - - diff --git a/serving/numaflow-models/src/models/ud_source.rs b/serving/numaflow-models/src/models/ud_source.rs index 2242908feb..65986169a6 100644 --- a/serving/numaflow-models/src/models/ud_source.rs +++ b/serving/numaflow-models/src/models/ud_source.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UdSource { #[serde(rename = "container")] @@ -24,5 +21,3 @@ impl UdSource { } } } - - diff --git a/serving/numaflow-models/src/models/ud_transformer.rs b/serving/numaflow-models/src/models/ud_transformer.rs index d397305f2b..ff698fab80 100644 --- a/serving/numaflow-models/src/models/ud_transformer.rs +++ b/serving/numaflow-models/src/models/ud_transformer.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UdTransformer { #[serde(rename = "builtin", skip_serializing_if = "Option::is_none")] @@ -27,5 +24,3 @@ impl UdTransformer { } } } - - diff --git a/serving/numaflow-models/src/models/udf.rs b/serving/numaflow-models/src/models/udf.rs index cfeae12666..cd1f6b520f 100644 --- a/serving/numaflow-models/src/models/udf.rs +++ b/serving/numaflow-models/src/models/udf.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Udf { #[serde(rename = "builtin", skip_serializing_if = "Option::is_none")] @@ -30,5 +27,3 @@ impl Udf { } } } - - diff --git a/serving/numaflow-models/src/models/vertex.rs b/serving/numaflow-models/src/models/vertex.rs index d23f3023c5..1f9cc0e617 100644 --- a/serving/numaflow-models/src/models/vertex.rs +++ b/serving/numaflow-models/src/models/vertex.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Vertex { /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources @@ -38,5 +35,3 @@ impl Vertex { } } } - - diff --git a/serving/numaflow-models/src/models/vertex_instance.rs b/serving/numaflow-models/src/models/vertex_instance.rs index 6c3298c345..e7ba664e60 100644 --- a/serving/numaflow-models/src/models/vertex_instance.rs +++ b/serving/numaflow-models/src/models/vertex_instance.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// VertexInstance : VertexInstance is a wrapper of a vertex instance, which contains the vertex spec and the instance information such as hostname and replica index. - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VertexInstance { #[serde(rename = "hostname", skip_serializing_if = "Option::is_none")] @@ -32,5 +30,3 @@ impl VertexInstance { } } } - - diff --git a/serving/numaflow-models/src/models/vertex_limits.rs b/serving/numaflow-models/src/models/vertex_limits.rs index 713c0faa41..6f201e8de8 100644 --- a/serving/numaflow-models/src/models/vertex_limits.rs +++ b/serving/numaflow-models/src/models/vertex_limits.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VertexLimits { /// BufferMaxLength is used to define the max length of a buffer. It overrides the settings from pipeline limits. @@ -36,5 +33,3 @@ impl VertexLimits { } } } - - diff --git a/serving/numaflow-models/src/models/vertex_list.rs b/serving/numaflow-models/src/models/vertex_list.rs index a350a3b983..68967d8aa5 100644 --- a/serving/numaflow-models/src/models/vertex_list.rs +++ b/serving/numaflow-models/src/models/vertex_list.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VertexList { /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources @@ -35,5 +32,3 @@ impl VertexList { } } } - - diff --git a/serving/numaflow-models/src/models/vertex_spec.rs b/serving/numaflow-models/src/models/vertex_spec.rs index f96eb218ed..a3fafd4882 100644 --- a/serving/numaflow-models/src/models/vertex_spec.rs +++ b/serving/numaflow-models/src/models/vertex_spec.rs @@ -4,19 +4,19 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VertexSpec { #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] pub affinity: Option, /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "automountServiceAccountToken", + skip_serializing_if = "Option::is_none" + )] pub automount_service_account_token: Option, #[serde(rename = "containerTemplate", skip_serializing_if = "Option::is_none")] pub container_template: Option>, @@ -30,12 +30,18 @@ pub struct VertexSpec { /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] pub image_pull_secrets: Option>, - #[serde(rename = "initContainerTemplate", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "initContainerTemplate", + skip_serializing_if = "Option::is_none" + )] pub init_container_template: Option>, /// List of customized init containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ #[serde(rename = "initContainers", skip_serializing_if = "Option::is_none")] pub init_containers: Option>, - #[serde(rename = "interStepBufferServiceName", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "interStepBufferServiceName", + skip_serializing_if = "Option::is_none" + )] pub inter_step_buffer_service_name: Option, #[serde(rename = "limits", skip_serializing_if = "Option::is_none")] pub limits: Option>, @@ -72,7 +78,10 @@ pub struct VertexSpec { /// Names of the side inputs used in this vertex. #[serde(rename = "sideInputs", skip_serializing_if = "Option::is_none")] pub side_inputs: Option>, - #[serde(rename = "sideInputsContainerTemplate", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "sideInputsContainerTemplate", + skip_serializing_if = "Option::is_none" + )] pub side_inputs_container_template: Option>, /// List of customized sidecar containers belonging to the pod. #[serde(rename = "sidecars", skip_serializing_if = "Option::is_none")] @@ -133,5 +142,3 @@ impl VertexSpec { } } } - - diff --git a/serving/numaflow-models/src/models/vertex_status.rs b/serving/numaflow-models/src/models/vertex_status.rs index 3e0377a795..6bcea1f106 100644 --- a/serving/numaflow-models/src/models/vertex_status.rs +++ b/serving/numaflow-models/src/models/vertex_status.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VertexStatus { /// Conditions are the latest available observations of a resource's current state. @@ -46,5 +43,3 @@ impl VertexStatus { } } } - - diff --git a/serving/numaflow-models/src/models/vertex_template.rs b/serving/numaflow-models/src/models/vertex_template.rs index 6bd04e9cad..34aae02b0e 100644 --- a/serving/numaflow-models/src/models/vertex_template.rs +++ b/serving/numaflow-models/src/models/vertex_template.rs @@ -4,19 +4,19 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VertexTemplate { #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] pub affinity: Option, /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - #[serde(rename = "automountServiceAccountToken", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "automountServiceAccountToken", + skip_serializing_if = "Option::is_none" + )] pub automount_service_account_token: Option, #[serde(rename = "containerTemplate", skip_serializing_if = "Option::is_none")] pub container_template: Option>, @@ -28,7 +28,10 @@ pub struct VertexTemplate { /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] pub image_pull_secrets: Option>, - #[serde(rename = "initContainerTemplate", skip_serializing_if = "Option::is_none")] + #[serde( + rename = "initContainerTemplate", + skip_serializing_if = "Option::is_none" + )] pub init_container_template: Option>, #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] pub metadata: Option>, @@ -75,5 +78,3 @@ impl VertexTemplate { } } } - - diff --git a/serving/numaflow-models/src/models/watermark.rs b/serving/numaflow-models/src/models/watermark.rs index e32de79335..b54bf2a74b 100644 --- a/serving/numaflow-models/src/models/watermark.rs +++ b/serving/numaflow-models/src/models/watermark.rs @@ -4,13 +4,10 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ - - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Watermark { /// Disabled toggles the watermark propagation, defaults to false. @@ -31,5 +28,3 @@ impl Watermark { } } } - - diff --git a/serving/numaflow-models/src/models/window.rs b/serving/numaflow-models/src/models/window.rs index 3f72f7be34..28d4d42243 100644 --- a/serving/numaflow-models/src/models/window.rs +++ b/serving/numaflow-models/src/models/window.rs @@ -4,14 +4,12 @@ * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: latest - * + * * Generated by: https://openapi-generator.tech */ /// Window : Window describes windowing strategy - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Window { #[serde(rename = "fixed", skip_serializing_if = "Option::is_none")] @@ -32,5 +30,3 @@ impl Window { } } } - - From 4747e564271786c67be5e922d954df924b391394 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Tue, 6 Aug 2024 20:46:14 +0530 Subject: [PATCH 06/23] chore: publishing extension image (#1874) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- .github/workflows/release.yml | 4 +- .gitignore | 4 - Dockerfile | 55 +- Makefile | 6 +- pkg/apis/numaflow/v1alpha1/serving_source.go | 2 +- pkg/apis/numaflow/v1alpha1/vertex_types.go | 7 +- .../numaflow/v1alpha1/vertex_types_test.go | 6 +- pkg/watermark/fetch/processor_manager_test.go | 2 +- serving/Cargo.lock | 3445 +++++++++++++++++ serving/Cargo.toml | 3 +- serving/Dockerfile | 54 +- serving/Makefile | 15 + serving/src/app/callback/store/redisstore.rs | 2 +- serving/src/main.rs | 2 +- test/fixtures/e2e_suite.go | 1 + 15 files changed, 3556 insertions(+), 52 deletions(-) create mode 100644 serving/Cargo.lock create mode 100644 serving/Makefile diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 82e27e4c53..e5b597014d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -165,7 +165,7 @@ jobs: env: COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}} COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}} - + - name: Release binaries uses: softprops/action-gh-release@v1 if: startsWith(github.ref, 'refs/tags/') @@ -179,4 +179,4 @@ jobs: /tmp/sbom.tar.gz /tmp/sbom.tar.gz.sig env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.gitignore b/.gitignore index a4acf2f150..50d5b708c3 100644 --- a/.gitignore +++ b/.gitignore @@ -32,10 +32,6 @@ docs/APIs.html debug/ target/ -# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries -# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html -Cargo.lock - # These are backup files generated by rustfmt **/*.rs.bk diff --git a/Dockerfile b/Dockerfile index 57787db67d..4ea993836f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,27 +1,66 @@ -ARG BASE_IMAGE=scratch +ARG BASE_IMAGE=gcr.io/distroless/cc-debian12 ARG ARCH=$TARGETARCH #################################################################################################### # base #################################################################################################### -FROM alpine:3.17 as base +FROM debian:bullseye as base ARG ARCH -RUN apk update && apk upgrade && \ - apk add ca-certificates && \ - apk --no-cache add tzdata COPY dist/numaflow-linux-${ARCH} /bin/numaflow RUN chmod +x /bin/numaflow +#################################################################################################### +# extension base +#################################################################################################### +FROM rust:1.79-bookworm as extension-base + +RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash + +RUN cargo new serve +# Create a new empty shell project +WORKDIR /serve +RUN cargo new servesink +COPY ./serving/servesink/Cargo.toml ./servesink/ + +RUN cargo new extras/upstreams +COPY ./serving/extras/upstreams/Cargo.toml ./extras/upstreams/ + +RUN cargo new backoff +COPY ./serving/backoff/Cargo.toml ./backoff/ + +RUN cargo new numaflow-models +COPY ./serving/numaflow-models/Cargo.toml ./numaflow-models/ + +# Copy all Cargo.toml and Cargo.lock files for caching dependencies +COPY ./serving/Cargo.toml ./serving/Cargo.lock ./ + +# Build only the dependencies to cache them +RUN cargo build --release + +# Copy the actual source code files of the main project and the subprojects +COPY ./serving/src ./src +COPY ./serving/servesink/src ./servesink/src +COPY ./serving/extras/upstreams/src ./extras/upstreams/src +COPY ./serving/backoff/src ./backoff/src +COPY ./serving/numaflow-models/src ./numaflow-models/src + +# Build the real binaries +RUN touch src/main.rs servesink/main.rs extras/upstreams/main.rs numaflow-models/main.rs && \ + cargo build --release + #################################################################################################### # numaflow #################################################################################################### ARG BASE_IMAGE FROM ${BASE_IMAGE} as numaflow -COPY --from=base /usr/share/zoneinfo /usr/share/zoneinfo -COPY --from=base /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt + COPY --from=base /bin/numaflow /bin/numaflow COPY ui/build /ui/build + +COPY --from=extension-base /serve/target/release/serve /bin/serve +COPY ./serving/config config + ENTRYPOINT [ "/bin/numaflow" ] #################################################################################################### @@ -40,4 +79,4 @@ RUN chmod +x /bin/e2eapi #################################################################################################### FROM scratch AS e2eapi COPY --from=testbase /bin/e2eapi . -ENTRYPOINT ["/e2eapi"] +ENTRYPOINT ["/e2eapi"] \ No newline at end of file diff --git a/Makefile b/Makefile index e4a7b170c5..4e1ee98fa4 100644 --- a/Makefile +++ b/Makefile @@ -5,8 +5,8 @@ CURRENT_DIR=$(shell pwd) DIST_DIR=${CURRENT_DIR}/dist BINARY_NAME:=numaflow DOCKERFILE:=Dockerfile -DEV_BASE_IMAGE:=alpine:3.17 -RELEASE_BASE_IMAGE:=scratch +DEV_BASE_IMAGE:=debian:bookworm +RELEASE_BASE_IMAGE:=gcr.io/distroless/cc-debian12 BUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ') GIT_COMMIT=$(shell git rev-parse HEAD) @@ -202,7 +202,7 @@ clean: .PHONY: crds crds: - ./hack/crdgen.sh + ./hack/crdgen.sh .PHONY: manifests manifests: crds diff --git a/pkg/apis/numaflow/v1alpha1/serving_source.go b/pkg/apis/numaflow/v1alpha1/serving_source.go index de72021ff2..2041f546f5 100644 --- a/pkg/apis/numaflow/v1alpha1/serving_source.go +++ b/pkg/apis/numaflow/v1alpha1/serving_source.go @@ -29,7 +29,7 @@ type ServingStore struct { // GetTTL returns the TTL for the data in the store. If the TTL is not set, it returns 24 hours. func (ss *ServingStore) GetTTL() *metav1.Duration { - if ss == nil { + if ss.TTL == nil { return &metav1.Duration{Duration: DefaultServingTTL} } return ss.TTL diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types.go b/pkg/apis/numaflow/v1alpha1/vertex_types.go index 186fa9c0bf..499924cf7e 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types.go @@ -49,6 +49,8 @@ const ( VertexTypeReduceUDF VertexType = "ReduceUDF" ) +const ServingBinary = "/bin/serve" + // +genclient // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=vtx @@ -344,8 +346,9 @@ func (v Vertex) getServingContainer(req GetVertexPodSpecReq) (corev1.Container, servingContainer := corev1.Container{ Name: ServingSourceContainer, Env: req.Env, - Image: "numaserve:0.1", // TODO: use appropriate image - ImagePullPolicy: corev1.PullIfNotPresent, + Image: req.Image, + ImagePullPolicy: req.PullPolicy, + Command: []string{ServingBinary}, // we use the same image, but we execute the extension binary Resources: req.DefaultResources, } diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go index 6610908836..d0a3134ba7 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go @@ -421,7 +421,9 @@ func TestGetPodSpec(t *testing.T) { t.Run("test serving source", func(t *testing.T) { testObj := testVertex.DeepCopy() testObj.Spec.Source = &Source{ - Serving: &ServingSource{}, + Serving: &ServingSource{ + Store: &ServingStore{}, + }, } s, err := testObj.GetPodSpec(req) assert.NoError(t, err) @@ -446,7 +448,7 @@ func TestGetPodSpec(t *testing.T) { assert.Equal(t, CtrInit, s.InitContainers[0].Name) assert.Equal(t, CtrServing, s.Containers[1].Name) - assert.Equal(t, "numaserve:0.1", s.Containers[1].Image) + assert.Equal(t, "test-f-image", s.Containers[1].Image) assert.Equal(t, corev1.PullIfNotPresent, s.Containers[1].ImagePullPolicy) envNames = []string{} for _, e := range s.Containers[1].Env { diff --git a/pkg/watermark/fetch/processor_manager_test.go b/pkg/watermark/fetch/processor_manager_test.go index 32ab1eca8d..246fad053e 100644 --- a/pkg/watermark/fetch/processor_manager_test.go +++ b/pkg/watermark/fetch/processor_manager_test.go @@ -388,7 +388,7 @@ func TestProcessorManagerWatchForMapWithMultiplePartition(t *testing.T) { t.Fatalf("expected 2 processors, got %d: %s", len(allProcessors), ctx.Err()) } default: - time.Sleep(1 * time.Millisecond) + time.Sleep(10 * time.Millisecond) allProcessors = processorManager.getAllProcessors() } } diff --git a/serving/Cargo.lock b/serving/Cargo.lock new file mode 100644 index 0000000000..e27de24107 --- /dev/null +++ b/serving/Cargo.lock @@ -0,0 +1,3445 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anyhow" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" + +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + +[[package]] +name = "async-nats" +version = "0.35.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab8df97cb8fc4a884af29ab383e9292ea0939cfcdd7d2a17179086dc6c427e7f" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures", + "memchr", + "nkeys", + "nuid", + "once_cell", + "portable-atomic", + "rand", + "regex", + "ring", + "rustls-native-certs", + "rustls-pemfile 2.1.3", + "rustls-webpki", + "serde", + "serde_json", + "serde_nanos", + "serde_repr", + "thiserror", + "time", + "tokio", + "tokio-rustls", + "tracing", + "tryhard", + "url", +] + +[[package]] +name = "async-stream" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "async-trait" +version = "0.1.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" + +[[package]] +name = "axum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-macros" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00c055ee2d014ae5981ce1016374e8213682aa14d9bf40e48ab48b5f3ef20eaa" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "backoff" +version = "0.1.0" +dependencies = [ + "pin-project", + "tokio", +] + +[[package]] +name = "backtrace" +version = "0.3.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +dependencies = [ + "serde", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +dependencies = [ + "serde", +] + +[[package]] +name = "cc" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26a5c3fd7bfa1ce3897a3a3501d362b2d87b7f2583ebcb4a949ec25911025cbc" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-targets 0.52.6", +] + +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "futures-core", + "memchr", + "pin-project-lite", + "tokio", + "tokio-util", +] + +[[package]] +name = "config" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7328b20597b53c2454f0b1919720c25c7339051c02b72b7e05409e00b14132be" +dependencies = [ + "async-trait", + "convert_case", + "json5", + "lazy_static", + "nom", + "pathdiff", + "ron", + "rust-ini", + "serde", + "serde_json", + "toml", + "yaml-rust", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom", + "once_cell", + "tiny-keccak", +] + +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" + +[[package]] +name = "cpufeatures" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +dependencies = [ + "libc", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "rustc_version", + "subtle", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "data-encoding" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" + +[[package]] +name = "der" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", + "serde", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "dlv-list" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" +dependencies = [ + "const-random", +] + +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +dependencies = [ + "curve25519-dalek", + "ed25519", + "sha2", + "signature", + "subtle", +] + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "encoding_rs" +version = "0.8.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "fastrand" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-macro" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gimli" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" + +[[package]] +name = "h2" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap 2.3.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.1.0", + "indexmap 2.3.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + +[[package]] +name = "headers" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9" +dependencies = [ + "base64 0.21.7", + "bytes", + "headers-core", + "http 1.1.0", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" +dependencies = [ + "http 1.1.0", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "0.14.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.1", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-http-proxy" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d06dbdfbacf34d996c6fb540a71a684a7aae9056c71951163af8a8a4c07b9a4" +dependencies = [ + "bytes", + "futures-util", + "headers", + "http 1.1.0", + "hyper 1.4.1", + "hyper-rustls", + "hyper-util", + "pin-project-lite", + "rustls-native-certs", + "tokio", + "tokio-rustls", + "tower-service", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +dependencies = [ + "futures-util", + "http 1.1.0", + "hyper 1.4.1", + "hyper-util", + "log", + "rustls", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +dependencies = [ + "hyper 1.4.1", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper 0.14.30", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "hyper 1.4.1", + "pin-project-lite", + "socket2", + "tokio", + "tower", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" +dependencies = [ + "equivalent", + "hashbrown 0.14.5", +] + +[[package]] +name = "ipnet" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "js-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "json5" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" +dependencies = [ + "pest", + "pest_derive", + "serde", +] + +[[package]] +name = "jsonpath-rust" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d8fe85bd70ff715f31ce8c739194b423d79811a19602115d611a3ec85d6200" +dependencies = [ + "lazy_static", + "once_cell", + "pest", + "pest_derive", + "regex", + "serde_json", + "thiserror", +] + +[[package]] +name = "k8s-openapi" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19501afb943ae5806548bc3ebd7f3374153ca057a38f480ef30adfde5ef09755" +dependencies = [ + "base64 0.22.1", + "chrono", + "serde", + "serde-value", + "serde_json", +] + +[[package]] +name = "kube" +version = "0.93.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0365920075af1a2d23619c1ca801c492f2400157de42627f041a061716e76416" +dependencies = [ + "k8s-openapi", + "kube-client", + "kube-core", +] + +[[package]] +name = "kube-client" +version = "0.93.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d81336eb3a5b10a40c97a5a97ad66622e92bad942ce05ee789edd730aa4f8603" +dependencies = [ + "base64 0.22.1", + "bytes", + "chrono", + "either", + "futures", + "home", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-http-proxy", + "hyper-rustls", + "hyper-timeout", + "hyper-util", + "jsonpath-rust", + "k8s-openapi", + "kube-core", + "pem", + "rustls", + "rustls-pemfile 2.1.3", + "secrecy", + "serde", + "serde_json", + "serde_yaml", + "thiserror", + "tokio", + "tokio-util", + "tower", + "tower-http", + "tracing", +] + +[[package]] +name = "kube-core" +version = "0.93.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cce373a74d787d439063cdefab0f3672860bd7bac01a38e39019177e764a0fe6" +dependencies = [ + "chrono", + "form_urlencoded", + "http 1.1.0", + "k8s-openapi", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.155" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" + +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "metrics" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "884adb57038347dfbaf2d5065887b6cf4312330dc8e94bc30a1a839bd79d3261" +dependencies = [ + "ahash", + "portable-atomic", +] + +[[package]] +name = "metrics-exporter-prometheus" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4f0c8427b39666bf970460908b213ec09b3b350f20c0c2eabcbba51704a08e6" +dependencies = [ + "base64 0.22.1", + "indexmap 2.3.0", + "metrics", + "metrics-util", + "quanta", + "thiserror", +] + +[[package]] +name = "metrics-util" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4259040465c955f9f2f1a4a8a16dc46726169bca0f88e8fb2dbeced487c3e828" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", + "hashbrown 0.14.5", + "metrics", + "num_cpus", + "quanta", + "sketches-ddsketch", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mime_guess" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" +dependencies = [ + "hermit-abi", + "libc", + "wasi", + "windows-sys 0.52.0", +] + +[[package]] +name = "multimap" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" + +[[package]] +name = "native-tls" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "nkeys" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2de02c883c178998da8d0c9816a88ef7ef5c58314dd1585c97a4a5679f3ab337" +dependencies = [ + "data-encoding", + "ed25519", + "ed25519-dalek", + "getrandom", + "log", + "rand", + "signatory", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "nuid" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc895af95856f929163a0aa20c26a78d26bfdc839f51b9d5aa7a5b79e52b7e83" +dependencies = [ + "rand", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "numaflow" +version = "0.1.0" +source = "git+https://github.com/numaproj/numaflow-rs.git?branch=main#f265a615716ab3ec3adf85e8c24413cc076cd695" +dependencies = [ + "chrono", + "futures-util", + "hyper-util", + "prost", + "prost-types", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tonic", + "tonic-build", + "tracing", + "uuid", +] + +[[package]] +name = "numaflow-models" +version = "0.0.0-pre" +dependencies = [ + "k8s-openapi", + "kube", + "reqwest 0.11.27", + "serde", + "serde_derive", + "serde_json", + "url", + "uuid", +] + +[[package]] +name = "object" +version = "0.36.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f203fa8daa7bb185f760ae12bd8e097f63d17041dcdcaf675ac54cdf863170e" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "openssl" +version = "0.10.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "ordered-multimap" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ed8acf08e98e744e5384c8bc63ceb0364e68a6854187221c18df61c4797690e" +dependencies = [ + "dlv-list", + "hashbrown 0.13.2", +] + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.52.6", +] + +[[package]] +name = "pathdiff" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" + +[[package]] +name = "pem" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +dependencies = [ + "base64 0.22.1", + "serde", +] + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pest" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pest_meta" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" +dependencies = [ + "once_cell", + "pest", + "sha2", +] + +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset", + "indexmap 2.3.0", +] + +[[package]] +name = "pin-project" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" + +[[package]] +name = "portable-atomic" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prost" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bb182580f71dd070f88d01ce3de9f4da5021db7115d2e1c3605a754153b77c1" +dependencies = [ + "bytes", + "heck 0.5.0", + "itertools", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "prost-types" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cee5168b05f49d4b0ca581206eb14a7b22fafd963efe729ac48eb03266e25cc2" +dependencies = [ + "prost", +] + +[[package]] +name = "quanta" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + +[[package]] +name = "quote" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "raw-cpuid" +version = "11.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb9ee317cfe3fbd54b36a511efc1edd42e216903c9cd575e686dd68a2ba90d8d" +dependencies = [ + "bitflags 2.6.0", +] + +[[package]] +name = "redis" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e902a69d09078829137b4a5d9d082e0490393537badd7c91a3d69d14639e115f" +dependencies = [ + "arc-swap", + "async-trait", + "bytes", + "combine", + "futures", + "futures-util", + "itoa", + "num-bigint", + "percent-encoding", + "pin-project-lite", + "ryu", + "sha1_smol", + "socket2", + "tokio", + "tokio-retry", + "tokio-util", + "url", +] + +[[package]] +name = "redox_syscall" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +dependencies = [ + "bitflags 2.6.0", +] + +[[package]] +name = "regex" +version = "1.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.4", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" + +[[package]] +name = "reqwest" +version = "0.11.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" +dependencies = [ + "base64 0.21.7", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.30", + "hyper-tls 0.5.0", + "ipnet", + "js-sys", + "log", + "mime", + "mime_guess", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile 1.0.4", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 0.1.2", + "system-configuration", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg 0.50.0", +] + +[[package]] +name = "reqwest" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +dependencies = [ + "base64 0.22.1", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-rustls", + "hyper-tls 0.6.0", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile 2.1.3", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "system-configuration", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg 0.52.0", +] + +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "spin", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "ron" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" +dependencies = [ + "base64 0.21.7", + "bitflags 2.6.0", + "serde", + "serde_derive", +] + +[[package]] +name = "rust-ini" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e2a3bcec1f113553ef1c88aae6c020a369d03d55b58de9869a0908930385091" +dependencies = [ + "cfg-if", + "ordered-multimap", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "0.38.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +dependencies = [ + "bitflags 2.6.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.23.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +dependencies = [ + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a88d6d420651b496bdd98684116959239430022a115c1240e6c3993be0b15fba" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.1.3", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-pemfile" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +dependencies = [ + "base64 0.22.1", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" + +[[package]] +name = "rustls-webpki" +version = "0.102.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "schannel" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "secrecy" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" +dependencies = [ + "serde", + "zeroize", +] + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" + +[[package]] +name = "serde" +version = "1.0.204" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.204" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.122" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_nanos" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a93142f0367a4cc53ae0fead1bcda39e85beccfad3dcd717656cacab94b12985" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + +[[package]] +name = "serde_repr" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_spanned" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap 2.3.0", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "serve" +version = "0.1.0" +dependencies = [ + "async-nats", + "axum", + "axum-macros", + "backoff", + "base64 0.22.1", + "chrono", + "config", + "hyper-util", + "metrics", + "metrics-exporter-prometheus", + "redis", + "serde", + "serde_json", + "tempfile", + "tokio", + "tower", + "tower-http", + "tracing", + "tracing-subscriber", + "trait-variant", + "uuid", +] + +[[package]] +name = "servesink" +version = "0.1.0" +dependencies = [ + "numaflow", + "reqwest 0.12.5", + "tokio", + "tonic", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha1_smol" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +dependencies = [ + "libc", +] + +[[package]] +name = "signatory" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1e303f8205714074f6068773f0e29527e0453937fe837c9717d066635b65f31" +dependencies = [ + "pkcs8", + "rand_core", + "signature", + "zeroize", +] + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest", + "rand_core", +] + +[[package]] +name = "sketches-ddsketch" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85636c14b73d81f541e525f585c0a2109e6744e1565b5c1668e31c70c10ed65c" + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "socket2" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tempfile" +version = "3.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fcd239983515c23a32fb82099f97d0b11b8c72f654ed659363a95c3dad7a53" +dependencies = [ + "cfg-if", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.52.0", +] + +[[package]] +name = "thiserror" +version = "1.0.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "time" +version = "0.3.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + +[[package]] +name = "time-macros" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinyvec" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.39.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.52.0", +] + +[[package]] +name = "tokio-macros" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-retry" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" +dependencies = [ + "pin-project", + "rand", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +dependencies = [ + "rustls", + "rustls-pki-types", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +dependencies = [ + "indexmap 2.3.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "tonic" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38659f4a91aba8598d27821589f5db7dddd94601e7a01b1e485a50e5484c7401" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "socket2", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "568392c5a2bd0020723e3f387891176aabafe36fd9fcd074ad309dfa0c8eb964" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "quote", + "syn", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +dependencies = [ + "base64 0.21.7", + "bitflags 2.6.0", + "bytes", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "trait-variant" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "tryhard" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c9f0a709784e86923586cff0d872dba54cd2d2e116b3bc57587d15737cfce9d" +dependencies = [ + "futures", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + +[[package]] +name = "unicase" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-normalization" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" + +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "upstreams" +version = "0.1.0" +dependencies = [ + "axum", + "axum-macros", + "http-body-util", + "serde", + "serde_json", + "tokio", + "tower", + "tower-http", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "url" +version = "2.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "uuid" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +dependencies = [ + "getrandom", + "serde", +] + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" + +[[package]] +name = "web-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winnow" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +dependencies = [ + "memchr", +] + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "winreg" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" diff --git a/serving/Cargo.toml b/serving/Cargo.toml index 6353aac742..10138b1386 100644 --- a/serving/Cargo.toml +++ b/serving/Cargo.toml @@ -25,10 +25,11 @@ tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } uuid = { version = "1.10.0", features = ["v4"] } tempfile = "3.10.1" -redis = { version = "0.25.3", features = ["tokio-comp", "aio", "connection-manager"] } +redis = { version = "0.26.0", features = ["tokio-comp", "aio", "connection-manager"] } config = "0.14.0" trait-variant = "0.1.2" chrono = { version = "0.4", features = ["serde"] } # intern backoff = { path = "backoff" } base64 = "0.22.1" + diff --git a/serving/Dockerfile b/serving/Dockerfile index d5e9b8f803..863b999a81 100644 --- a/serving/Dockerfile +++ b/serving/Dockerfile @@ -1,44 +1,46 @@ -FROM rust:1.79-bookworm as build +# Use multi-stage builds to keep the final image small +# Use an official Rust image for the build stage +FROM rust:1.79-bookworm as builder -# For faster/easier installation of Rust binaries RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash -RUN apt-get update \ - && apt-get install -y protobuf-compiler - -# Compile and cache our dependencies in a layer -RUN cargo new /serve +RUN cargo new serve +# Create a new empty shell project WORKDIR /serve -COPY Cargo.toml . +RUN cargo new servesink +COPY ./servesink/Cargo.toml ./servesink/ RUN cargo new extras/upstreams -COPY extras/upstreams/Cargo.toml extras/upstreams/Cargo.toml - -RUN cargo new servesink -COPY servesink/Cargo.toml servesink/Cargo.toml +COPY ./extras/upstreams/Cargo.toml ./extras/upstreams/ RUN cargo new backoff -COPY backoff/Cargo.toml backoff/Cargo.toml +COPY ./backoff/Cargo.toml ./backoff/Cargo.toml -RUN cargo build --release - -COPY ./ /serve -# update timestamps to force a new build -RUN touch src/main.rs servesink/src/main.rs extras/upstreams/src/main.rs +RUN cargo new numaflow-models +COPY ./numaflow-models/Cargo.toml ./numaflow-models/ -RUN --mount=type=cache,target=/usr/local/cargo/registry cargo build --release +# Copy all Cargo.toml and Cargo.lock files for caching dependencies +COPY ./Cargo.toml ./Cargo.lock ./ -### Final +# Build only the dependencies to cache them +RUN cargo build --release -FROM debian:bookworm +# Copy the actual source code files of the main project and the subprojects +COPY ./src ./src +COPY ./servesink/src ./servesink/src +COPY ./extras/upstreams/src ./extras/upstreams/src +COPY ./backoff/src ./backoff/src +COPY ./numaflow-models/src ./numaflow-models/src -USER root +# Build the real binaries +RUN touch src/main.rs servesink/main.rs extras/upstreams/main.rs numaflow-models/main.rs && \ + cargo build --release -RUN apt-get update \ - && apt-get install -y openssl +# Use a lightweight image for the runtime +FROM gcr.io/distroless/cc-debian12 as numaflow-ext -COPY --from=build /serve/target/release/serve . +COPY --from=builder /serve/target/release/serve . COPY ./config config -ENTRYPOINT ["./serve"] +ENTRYPOINT ["./serve"] \ No newline at end of file diff --git a/serving/Makefile b/serving/Makefile new file mode 100644 index 0000000000..761882a4ee --- /dev/null +++ b/serving/Makefile @@ -0,0 +1,15 @@ +.PHONY: build +build: + cargo build --release + +.PHONY: test +test: + cargo test + +.PHONY: all-tests +all-tests: + cargo test --all-features + +.PHONY: clean +clean: + cargo clean \ No newline at end of file diff --git a/serving/src/app/callback/store/redisstore.rs b/serving/src/app/callback/store/redisstore.rs index 73ed36c5a1..dea8f0b41d 100644 --- a/serving/src/app/callback/store/redisstore.rs +++ b/serving/src/app/callback/store/redisstore.rs @@ -188,7 +188,7 @@ impl super::Store for RedisConnection { // Check if the Redis connection is healthy async fn ready(&mut self) -> bool { let mut conn = self.conn_manager.clone(); - match redis::cmd("PING").query_async::<_, String>(&mut conn).await { + match redis::cmd("PING").query_async::(&mut conn).await { Ok(response) => response == "PONG", Err(_) => false, } diff --git a/serving/src/main.rs b/serving/src/main.rs index 0d3b4a6bc7..6e3beea02d 100644 --- a/serving/src/main.rs +++ b/serving/src/main.rs @@ -50,4 +50,4 @@ async fn flatten(handle: tokio::task::JoinHandle>) -> Result { Ok(Err(err)) => Err(err), Err(err) => Err(Error::Other(format!("Spawning the server: {err:?}"))), } -} +} \ No newline at end of file diff --git a/test/fixtures/e2e_suite.go b/test/fixtures/e2e_suite.go index 78503bd317..3012be8754 100644 --- a/test/fixtures/e2e_suite.go +++ b/test/fixtures/e2e_suite.go @@ -140,6 +140,7 @@ func (s *E2ESuite) TearDownSuite() { Wait(3 * time.Second). Expect(). ISBSvcDeleted(defaultTimeout) + s.T().Log("ISB svc is deleted") deleteCMD := fmt.Sprintf("kubectl delete -k ../../config/apps/redis -n %s --ignore-not-found=true", Namespace) s.Given().When().Exec("sh", []string{"-c", deleteCMD}, OutputRegexp(`service "redis" deleted`)) From 1dbc091071ed3295dc5399a66a94ea22d424adc8 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Thu, 8 Aug 2024 14:02:21 -0700 Subject: [PATCH 07/23] feat: introducing MonoVertex (#1911) Signed-off-by: Derek Wang --- api/json-schema/schema.json | 276 + api/openapi-spec/swagger.json | 276 + cmd/commands/mvtx_daemon_server.go | 69 + cmd/commands/root.go | 1 + config/advanced-install/minimal-crds.yaml | 56 + .../namespaced-controller-wo-crds.yaml | 4 + .../namespaced-numaflow-server.yaml | 4 + config/advanced-install/numaflow-server.yaml | 4 + config/base/crds/full/kustomization.yaml | 1 + .../numaflow.numaproj.io_monovertices.yaml | 5539 ++++++ config/base/crds/minimal/kustomization.yaml | 1 + .../numaflow.numaproj.io_monovertices.yaml | 55 + .../numaflow-aggregate-to-admin.yaml | 4 + .../numaflow-aggregate-to-edit.yaml | 4 + .../numaflow-aggregate-to-view.yaml | 4 + .../numaflow-cluster-role.yaml | 4 + .../numaflow-server-cluster-role.yaml | 4 + config/install.yaml | 5559 ++++++ config/namespace-install.yaml | 5547 ++++++ .../controller-manager/numaflow-role.yaml | 4 + .../numaflow-server/numaflow-server-role.yaml | 4 + docs/APIs.md | 1322 +- examples/21-simple-mono-vertex.yaml | 13 + hack/update-codegen.sh | 2 +- pkg/apis/numaflow/v1alpha1/const.go | 16 +- pkg/apis/numaflow/v1alpha1/generated.pb.go | 16575 +++++++++------- pkg/apis/numaflow/v1alpha1/generated.proto | 123 + pkg/apis/numaflow/v1alpha1/get_spec_req.go | 14 + .../numaflow/v1alpha1/mono_vertex_types.go | 526 + .../numaflow/v1alpha1/openapi_generated.go | 697 +- pkg/apis/numaflow/v1alpha1/pipeline_types.go | 2 +- pkg/apis/numaflow/v1alpha1/register.go | 16 +- pkg/apis/numaflow/v1alpha1/scale.go | 134 + pkg/apis/numaflow/v1alpha1/scale_test.go | 66 + pkg/apis/numaflow/v1alpha1/vertex_types.go | 125 +- .../numaflow/v1alpha1/vertex_types_test.go | 43 +- .../v1alpha1/zz_generated.deepcopy.go | 223 + .../numaflow/v1alpha1/fake/fake_monovertex.go | 141 + .../v1alpha1/fake/fake_numaflow_client.go | 4 + .../numaflow/v1alpha1/generated_expansion.go | 2 + .../typed/numaflow/v1alpha1/monovertex.go | 195 + .../numaflow/v1alpha1/numaflow_client.go | 5 + .../informers/externalversions/generic.go | 2 + .../numaflow/v1alpha1/interface.go | 7 + .../numaflow/v1alpha1/monovertex.go | 90 + .../numaflow/v1alpha1/expansion_generated.go | 8 + .../listers/numaflow/v1alpha1/monovertex.go | 99 + pkg/metrics/metrics.go | 1 + pkg/mvtxdaemon/server/daemon_server.go | 165 + pkg/mvtxdaemon/server/metrics.go | 32 + pkg/reconciler/cmd/start.go | 43 +- pkg/reconciler/monovertex/controller.go | 460 + pkg/reconciler/monovertex/controller_test.go | 17 + pkg/reconciler/pipeline/controller.go | 3 +- pkg/reconciler/util.go | 3 + pkg/reconciler/vertex/controller.go | 2 +- .../get_mono_vertex_daemon_deployment_req.rs | 37 + .../models/get_mono_vertex_pod_spec_req.rs | 37 + serving/numaflow-models/src/models/mod.rs | 14 + .../numaflow-models/src/models/mono_vertex.rs | 37 + .../src/models/mono_vertex_limits.rs | 27 + .../src/models/mono_vertex_list.rs | 34 + .../src/models/mono_vertex_spec.rs | 103 + .../src/models/mono_vertex_status.rs | 48 + 64 files changed, 31455 insertions(+), 7478 deletions(-) create mode 100644 cmd/commands/mvtx_daemon_server.go create mode 100644 config/base/crds/full/numaflow.numaproj.io_monovertices.yaml create mode 100644 config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml create mode 100644 examples/21-simple-mono-vertex.yaml create mode 100644 pkg/apis/numaflow/v1alpha1/mono_vertex_types.go create mode 100644 pkg/apis/numaflow/v1alpha1/scale.go create mode 100644 pkg/apis/numaflow/v1alpha1/scale_test.go create mode 100644 pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_monovertex.go create mode 100644 pkg/client/clientset/versioned/typed/numaflow/v1alpha1/monovertex.go create mode 100644 pkg/client/informers/externalversions/numaflow/v1alpha1/monovertex.go create mode 100644 pkg/client/listers/numaflow/v1alpha1/monovertex.go create mode 100644 pkg/mvtxdaemon/server/daemon_server.go create mode 100644 pkg/mvtxdaemon/server/metrics.go create mode 100644 pkg/reconciler/monovertex/controller.go create mode 100644 pkg/reconciler/monovertex/controller_test.go create mode 100644 serving/numaflow-models/src/models/get_mono_vertex_daemon_deployment_req.rs create mode 100644 serving/numaflow-models/src/models/get_mono_vertex_pod_spec_req.rs create mode 100644 serving/numaflow-models/src/models/mono_vertex.rs create mode 100644 serving/numaflow-models/src/models/mono_vertex_limits.rs create mode 100644 serving/numaflow-models/src/models/mono_vertex_list.rs create mode 100644 serving/numaflow-models/src/models/mono_vertex_spec.rs create mode 100644 serving/numaflow-models/src/models/mono_vertex_status.rs diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index 410d4ad9db..9a119f6a1d 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -18311,6 +18311,58 @@ ], "type": "object" }, + "io.numaproj.numaflow.v1alpha1.GetMonoVertexDaemonDeploymentReq": { + "properties": { + "DefaultResources": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" + }, + "Env": { + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" + }, + "type": "array" + }, + "Image": { + "type": "string" + }, + "PullPolicy": { + "type": "string" + } + }, + "required": [ + "Image", + "PullPolicy", + "Env", + "DefaultResources" + ], + "type": "object" + }, + "io.numaproj.numaflow.v1alpha1.GetMonoVertexPodSpecReq": { + "properties": { + "DefaultResources": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" + }, + "Env": { + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" + }, + "type": "array" + }, + "Image": { + "type": "string" + }, + "PullPolicy": { + "type": "string" + } + }, + "required": [ + "Image", + "PullPolicy", + "Env", + "DefaultResources" + ], + "type": "object" + }, "io.numaproj.numaflow.v1alpha1.GetRedisServiceSpecReq": { "properties": { "Labels": { @@ -18979,6 +19031,230 @@ }, "type": "object" }, + "io.numaproj.numaflow.v1alpha1.MonoVertex": { + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertexSpec" + }, + "status": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertexStatus" + } + }, + "required": [ + "spec" + ], + "type": "object" + }, + "io.numaproj.numaflow.v1alpha1.MonoVertexLimits": { + "properties": { + "readBatchSize": { + "description": "Read batch size from the source.", + "format": "int64", + "type": "integer" + }, + "readTimeout": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration", + "description": "Read timeout duration from the source." + } + }, + "type": "object" + }, + "io.numaproj.numaflow.v1alpha1.MonoVertexList": { + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "items": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertex" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "required": [ + "items" + ], + "type": "object" + }, + "io.numaproj.numaflow.v1alpha1.MonoVertexSpec": { + "properties": { + "affinity": { + "$ref": "#/definitions/io.k8s.api.core.v1.Affinity", + "description": "The pod's scheduling constraints More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/" + }, + "automountServiceAccountToken": { + "description": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", + "type": "boolean" + }, + "containerTemplate": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.ContainerTemplate", + "description": "Container template for the main numa container." + }, + "daemonTemplate": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.DaemonTemplate", + "description": "Template for the daemon service deployment." + }, + "dnsConfig": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodDNSConfig", + "description": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy." + }, + "dnsPolicy": { + "description": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + "type": "string" + }, + "imagePullSecrets": { + "description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "initContainers": { + "description": "List of customized init containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Container" + }, + "type": "array" + }, + "limits": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertexLimits", + "description": "Limits define the limitations such as buffer read batch size for all the vertices of a pipeline, will override pipeline level settings" + }, + "metadata": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Metadata", + "description": "Metadata sets the pods's metadata, i.e. annotations and labels" + }, + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object" + }, + "priority": { + "description": "The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "format": "int32", + "type": "integer" + }, + "priorityClassName": { + "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "type": "string" + }, + "replicas": { + "format": "int32", + "type": "integer" + }, + "runtimeClassName": { + "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", + "type": "string" + }, + "scale": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Scale", + "description": "Settings for autoscaling" + }, + "securityContext": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext", + "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field." + }, + "serviceAccountName": { + "description": "ServiceAccountName applied to the pod", + "type": "string" + }, + "sidecars": { + "description": "List of customized sidecar containers belonging to the pod.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Container" + }, + "type": "array" + }, + "sink": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Sink" + }, + "source": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Source" + }, + "tolerations": { + "description": "If specified, the pod's tolerations.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + }, + "type": "array" + }, + "volumes": { + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Volume" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.numaproj.numaflow.v1alpha1.MonoVertexStatus": { + "properties": { + "conditions": { + "description": "Conditions are the latest available observations of a resource's current state.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "lastScaledAt": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "lastUpdated": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "type": "string" + }, + "observedGeneration": { + "format": "int64", + "type": "integer" + }, + "phase": { + "type": "string" + }, + "reason": { + "type": "string" + }, + "replicas": { + "format": "int64", + "type": "integer" + }, + "selector": { + "type": "string" + } + }, + "required": [ + "replicas" + ], + "type": "object" + }, "io.numaproj.numaflow.v1alpha1.NativeRedis": { "properties": { "affinity": { diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 812ab63a65..e0be1c1a7f 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -18315,6 +18315,58 @@ } } }, + "io.numaproj.numaflow.v1alpha1.GetMonoVertexDaemonDeploymentReq": { + "type": "object", + "required": [ + "Image", + "PullPolicy", + "Env", + "DefaultResources" + ], + "properties": { + "DefaultResources": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" + }, + "Env": { + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" + } + }, + "Image": { + "type": "string" + }, + "PullPolicy": { + "type": "string" + } + } + }, + "io.numaproj.numaflow.v1alpha1.GetMonoVertexPodSpecReq": { + "type": "object", + "required": [ + "Image", + "PullPolicy", + "Env", + "DefaultResources" + ], + "properties": { + "DefaultResources": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" + }, + "Env": { + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" + } + }, + "Image": { + "type": "string" + }, + "PullPolicy": { + "type": "string" + } + } + }, "io.numaproj.numaflow.v1alpha1.GetRedisServiceSpecReq": { "type": "object", "required": [ @@ -18974,6 +19026,230 @@ } } }, + "io.numaproj.numaflow.v1alpha1.MonoVertex": { + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertexSpec" + }, + "status": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertexStatus" + } + } + }, + "io.numaproj.numaflow.v1alpha1.MonoVertexLimits": { + "type": "object", + "properties": { + "readBatchSize": { + "description": "Read batch size from the source.", + "type": "integer", + "format": "int64" + }, + "readTimeout": { + "description": "Read timeout duration from the source.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration" + } + } + }, + "io.numaproj.numaflow.v1alpha1.MonoVertexList": { + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertex" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + } + }, + "io.numaproj.numaflow.v1alpha1.MonoVertexSpec": { + "type": "object", + "properties": { + "affinity": { + "description": "The pod's scheduling constraints More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/", + "$ref": "#/definitions/io.k8s.api.core.v1.Affinity" + }, + "automountServiceAccountToken": { + "description": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", + "type": "boolean" + }, + "containerTemplate": { + "description": "Container template for the main numa container.", + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.ContainerTemplate" + }, + "daemonTemplate": { + "description": "Template for the daemon service deployment.", + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.DaemonTemplate" + }, + "dnsConfig": { + "description": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + "$ref": "#/definitions/io.k8s.api.core.v1.PodDNSConfig" + }, + "dnsPolicy": { + "description": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + "type": "string" + }, + "imagePullSecrets": { + "description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "initContainers": { + "description": "List of customized init containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Container" + } + }, + "limits": { + "description": "Limits define the limitations such as buffer read batch size for all the vertices of a pipeline, will override pipeline level settings", + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertexLimits" + }, + "metadata": { + "description": "Metadata sets the pods's metadata, i.e. annotations and labels", + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Metadata" + }, + "nodeSelector": { + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "priority": { + "description": "The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "type": "integer", + "format": "int32" + }, + "priorityClassName": { + "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "type": "string" + }, + "replicas": { + "type": "integer", + "format": "int32" + }, + "runtimeClassName": { + "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", + "type": "string" + }, + "scale": { + "description": "Settings for autoscaling", + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Scale" + }, + "securityContext": { + "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext" + }, + "serviceAccountName": { + "description": "ServiceAccountName applied to the pod", + "type": "string" + }, + "sidecars": { + "description": "List of customized sidecar containers belonging to the pod.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Container" + } + }, + "sink": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Sink" + }, + "source": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Source" + }, + "tolerations": { + "description": "If specified, the pod's tolerations.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + } + }, + "volumes": { + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Volume" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + } + }, + "io.numaproj.numaflow.v1alpha1.MonoVertexStatus": { + "type": "object", + "required": [ + "replicas" + ], + "properties": { + "conditions": { + "description": "Conditions are the latest available observations of a resource's current state.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "lastScaledAt": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "lastUpdated": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "type": "string" + }, + "observedGeneration": { + "type": "integer", + "format": "int64" + }, + "phase": { + "type": "string" + }, + "reason": { + "type": "string" + }, + "replicas": { + "type": "integer", + "format": "int64" + }, + "selector": { + "type": "string" + } + } + }, "io.numaproj.numaflow.v1alpha1.NativeRedis": { "type": "object", "properties": { diff --git a/cmd/commands/mvtx_daemon_server.go b/cmd/commands/mvtx_daemon_server.go new file mode 100644 index 0000000000..769da50281 --- /dev/null +++ b/cmd/commands/mvtx_daemon_server.go @@ -0,0 +1,69 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "os" + + "github.com/spf13/cobra" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + + "github.com/numaproj/numaflow" + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + "github.com/numaproj/numaflow/pkg/mvtxdaemon/server" + "github.com/numaproj/numaflow/pkg/shared/logging" +) + +func NewMonoVtxDaemonServerCommand() *cobra.Command { + + command := &cobra.Command{ + Use: "mvtx-daemon-server", + Short: "Start the mono vertex daemon server", + RunE: func(cmd *cobra.Command, args []string) error { + monoVtx, err := decodeMonoVtx() + if err != nil { + return fmt.Errorf("failed to decode the mono vertex spec: %v", err) + } + logger := logging.NewLogger().Named("mvtx-daemon-server").With("mvtx", monoVtx.Name) + logger.Infow("Starting mono vertex daemon server", "version", numaflow.GetVersion()) + ctx := logging.WithLogger(signals.SetupSignalHandler(), logger) + server := server.NewDaemonServer(monoVtx) + return server.Run(ctx) + }, + } + return command +} + +func decodeMonoVtx() (*v1alpha1.MonoVertex, error) { + encodedMonoVtxSpec, defined := os.LookupEnv(v1alpha1.EnvMonoVertexObject) + if !defined { + return nil, fmt.Errorf("environment %q is not defined", v1alpha1.EnvMonoVertexObject) + } + decodedMonoVtxBytes, err := base64.StdEncoding.DecodeString(encodedMonoVtxSpec) + + if err != nil { + return nil, fmt.Errorf("failed to decode the encoded MonoVertex object, error: %w", err) + } + monoVtx := &v1alpha1.MonoVertex{} + if err = json.Unmarshal(decodedMonoVtxBytes, monoVtx); err != nil { + return nil, fmt.Errorf("failed to unmarshal the MonoVertex object, error: %w", err) + } + return monoVtx, nil +} diff --git a/cmd/commands/root.go b/cmd/commands/root.go index 20fda6ce1b..27931d2ff2 100644 --- a/cmd/commands/root.go +++ b/cmd/commands/root.go @@ -51,4 +51,5 @@ func init() { rootCmd.AddCommand(NewSideInputsManagerCommand()) rootCmd.AddCommand(NewSideInputsSynchronizerCommand()) rootCmd.AddCommand(NewDexServerInitCommand()) + rootCmd.AddCommand(NewMonoVtxDaemonServerCommand()) } diff --git a/config/advanced-install/minimal-crds.yaml b/config/advanced-install/minimal-crds.yaml index 1d72f226c9..f06a75b3f4 100644 --- a/config/advanced-install/minimal-crds.yaml +++ b/config/advanced-install/minimal-crds.yaml @@ -52,6 +52,62 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + name: monovertices.numaflow.numaproj.io +spec: + group: numaflow.numaproj.io + names: + kind: MonoVertex + listKind: MonoVertexList + plural: monovertices + shortNames: + - mvtx + singular: monovertex + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.reason + name: Reason + type: string + - jsonPath: .status.message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - spec + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: name: pipelines.numaflow.numaproj.io spec: diff --git a/config/advanced-install/namespaced-controller-wo-crds.yaml b/config/advanced-install/namespaced-controller-wo-crds.yaml index 493e136126..20ca3f2913 100644 --- a/config/advanced-install/namespaced-controller-wo-crds.yaml +++ b/config/advanced-install/namespaced-controller-wo-crds.yaml @@ -25,6 +25,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale verbs: - create - delete diff --git a/config/advanced-install/namespaced-numaflow-server.yaml b/config/advanced-install/namespaced-numaflow-server.yaml index f8da8ffffe..5262b8056f 100644 --- a/config/advanced-install/namespaced-numaflow-server.yaml +++ b/config/advanced-install/namespaced-numaflow-server.yaml @@ -25,6 +25,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale verbs: - create - delete diff --git a/config/advanced-install/numaflow-server.yaml b/config/advanced-install/numaflow-server.yaml index b92bc89f69..5e7982d7a3 100644 --- a/config/advanced-install/numaflow-server.yaml +++ b/config/advanced-install/numaflow-server.yaml @@ -46,6 +46,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale verbs: - create - delete diff --git a/config/base/crds/full/kustomization.yaml b/config/base/crds/full/kustomization.yaml index 89f945b5f7..6a183196eb 100644 --- a/config/base/crds/full/kustomization.yaml +++ b/config/base/crds/full/kustomization.yaml @@ -5,3 +5,4 @@ resources: - numaflow.numaproj.io_interstepbufferservices.yaml - numaflow.numaproj.io_pipelines.yaml - numaflow.numaproj.io_vertices.yaml + - numaflow.numaproj.io_monovertices.yaml diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml new file mode 100644 index 0000000000..15d016ba47 --- /dev/null +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -0,0 +1,5539 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: monovertices.numaflow.numaproj.io +spec: + group: numaflow.numaproj.io + names: + kind: MonoVertex + listKind: MonoVertexList + plural: monovertices + shortNames: + - mvtx + singular: monovertex + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.reason + name: Reason + type: string + - jsonPath: .status.message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + type: boolean + containerTemplate: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + daemonTemplate: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + type: boolean + containerTemplate: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + dnsPolicy: + type: string + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + initContainerTemplate: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + nodeSelector: + additionalProperties: + type: string + type: object + priority: + format: int32 + type: integer + priorityClassName: + type: string + replicas: + format: int32 + type: integer + runtimeClassName: + type: string + securityContext: + properties: + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccountName: + type: string + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + type: object + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + dnsPolicy: + type: string + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + initContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + limits: + properties: + readBatchSize: + default: 500 + format: int64 + type: integer + readTimeout: + default: 1s + type: string + type: object + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + nodeSelector: + additionalProperties: + type: string + type: object + priority: + format: int32 + type: integer + priorityClassName: + type: string + replicas: + default: 1 + format: int32 + type: integer + runtimeClassName: + type: string + scale: + properties: + cooldownSeconds: + format: int32 + type: integer + disabled: + type: boolean + lookbackSeconds: + format: int32 + type: integer + max: + format: int32 + type: integer + min: + format: int32 + type: integer + replicasPerScale: + format: int32 + type: integer + scaleDownCooldownSeconds: + format: int32 + type: integer + scaleUpCooldownSeconds: + format: int32 + type: integer + targetBufferAvailability: + format: int32 + type: integer + targetProcessingSeconds: + format: int32 + type: integer + zeroReplicaSleepSeconds: + format: int32 + type: integer + type: object + securityContext: + properties: + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccountName: + type: string + sidecars: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + sink: + properties: + blackhole: + type: object + fallback: + properties: + blackhole: + type: object + kafka: + properties: + brokers: + items: + type: string + type: array + config: + type: string + sasl: + properties: + gssapi: + properties: + authType: + type: string + kerberosConfigSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + keytabSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + realm: + type: string + serviceName: + type: string + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - authType + - realm + - serviceName + - usernameSecret + type: object + mechanism: + type: string + plain: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha256: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha512: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + required: + - mechanism + type: object + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + topic: + type: string + required: + - topic + type: object + log: + type: object + udsink: + properties: + container: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + required: + - container + type: object + type: object + kafka: + properties: + brokers: + items: + type: string + type: array + config: + type: string + sasl: + properties: + gssapi: + properties: + authType: + type: string + kerberosConfigSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + keytabSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + realm: + type: string + serviceName: + type: string + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - authType + - realm + - serviceName + - usernameSecret + type: object + mechanism: + type: string + plain: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha256: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha512: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + required: + - mechanism + type: object + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + topic: + type: string + required: + - topic + type: object + log: + type: object + udsink: + properties: + container: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + required: + - container + type: object + type: object + source: + properties: + generator: + properties: + duration: + default: 1s + type: string + jitter: + default: 0s + type: string + keyCount: + format: int32 + type: integer + msgSize: + default: 8 + format: int32 + type: integer + rpu: + default: 5 + format: int64 + type: integer + value: + format: int64 + type: integer + valueBlob: + type: string + type: object + http: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + service: + type: boolean + type: object + jetstream: + properties: + auth: + properties: + basic: + properties: + password: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + user: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + nkey: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + token: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + stream: + type: string + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + url: + type: string + required: + - stream + - url + type: object + kafka: + properties: + brokers: + items: + type: string + type: array + config: + type: string + consumerGroup: + type: string + sasl: + properties: + gssapi: + properties: + authType: + type: string + kerberosConfigSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + keytabSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + realm: + type: string + serviceName: + type: string + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - authType + - realm + - serviceName + - usernameSecret + type: object + mechanism: + type: string + plain: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha256: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha512: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + required: + - mechanism + type: object + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + topic: + type: string + required: + - topic + type: object + nats: + properties: + auth: + properties: + basic: + properties: + password: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + user: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + nkey: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + token: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + queue: + type: string + subject: + type: string + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + url: + type: string + required: + - queue + - subject + - url + type: object + serving: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + msgIDHeaderKey: + type: string + service: + type: boolean + store: + properties: + ttl: + type: string + url: + type: string + required: + - url + type: object + required: + - msgIDHeaderKey + - store + type: object + transformer: + properties: + builtin: + properties: + args: + items: + type: string + type: array + kwargs: + additionalProperties: + type: string + type: object + name: + enum: + - eventTimeExtractor + - filter + - timeExtractionFilter + type: string + required: + - name + type: object + container: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + udsource: + properties: + container: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + required: + - container + type: object + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastScaledAt: + format: date-time + type: string + lastUpdated: + format: date-time + type: string + message: + type: string + observedGeneration: + format: int64 + type: integer + phase: + enum: + - "" + - Running + - Failed + - Pausing + - Paused + - Deleting + type: string + reason: + type: string + replicas: + format: int32 + type: integer + selector: + type: string + required: + - replicas + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/base/crds/minimal/kustomization.yaml b/config/base/crds/minimal/kustomization.yaml index 89f945b5f7..6a183196eb 100644 --- a/config/base/crds/minimal/kustomization.yaml +++ b/config/base/crds/minimal/kustomization.yaml @@ -5,3 +5,4 @@ resources: - numaflow.numaproj.io_interstepbufferservices.yaml - numaflow.numaproj.io_pipelines.yaml - numaflow.numaproj.io_vertices.yaml + - numaflow.numaproj.io_monovertices.yaml diff --git a/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml new file mode 100644 index 0000000000..10c877b13b --- /dev/null +++ b/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml @@ -0,0 +1,55 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: monovertices.numaflow.numaproj.io +spec: + group: numaflow.numaproj.io + names: + kind: MonoVertex + listKind: MonoVertexList + plural: monovertices + shortNames: + - mvtx + singular: monovertex + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.reason + name: Reason + type: string + - jsonPath: .status.message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - spec + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} diff --git a/config/cluster-install/rbac/controller-manager/numaflow-aggregate-to-admin.yaml b/config/cluster-install/rbac/controller-manager/numaflow-aggregate-to-admin.yaml index 8c8116abe9..8743e3c573 100644 --- a/config/cluster-install/rbac/controller-manager/numaflow-aggregate-to-admin.yaml +++ b/config/cluster-install/rbac/controller-manager/numaflow-aggregate-to-admin.yaml @@ -18,6 +18,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale verbs: - create - delete diff --git a/config/cluster-install/rbac/controller-manager/numaflow-aggregate-to-edit.yaml b/config/cluster-install/rbac/controller-manager/numaflow-aggregate-to-edit.yaml index 05b38d3aee..914a3760cc 100644 --- a/config/cluster-install/rbac/controller-manager/numaflow-aggregate-to-edit.yaml +++ b/config/cluster-install/rbac/controller-manager/numaflow-aggregate-to-edit.yaml @@ -18,6 +18,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale verbs: - create - delete diff --git a/config/cluster-install/rbac/controller-manager/numaflow-aggregate-to-view.yaml b/config/cluster-install/rbac/controller-manager/numaflow-aggregate-to-view.yaml index f0671dfe26..ca793da0da 100644 --- a/config/cluster-install/rbac/controller-manager/numaflow-aggregate-to-view.yaml +++ b/config/cluster-install/rbac/controller-manager/numaflow-aggregate-to-view.yaml @@ -18,6 +18,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale verbs: - get - list diff --git a/config/cluster-install/rbac/controller-manager/numaflow-cluster-role.yaml b/config/cluster-install/rbac/controller-manager/numaflow-cluster-role.yaml index f30fddbce8..b976203cb7 100644 --- a/config/cluster-install/rbac/controller-manager/numaflow-cluster-role.yaml +++ b/config/cluster-install/rbac/controller-manager/numaflow-cluster-role.yaml @@ -29,6 +29,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale - apiGroups: - coordination.k8s.io resources: diff --git a/config/cluster-install/rbac/numaflow-server/numaflow-server-cluster-role.yaml b/config/cluster-install/rbac/numaflow-server/numaflow-server-cluster-role.yaml index 5fd55f8780..06daedc06e 100644 --- a/config/cluster-install/rbac/numaflow-server/numaflow-server-cluster-role.yaml +++ b/config/cluster-install/rbac/numaflow-server/numaflow-server-cluster-role.yaml @@ -29,6 +29,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale - apiGroups: - "" resources: diff --git a/config/install.yaml b/config/install.yaml index 1af95e16bf..7905eb6e56 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -2613,6 +2613,5545 @@ status: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: monovertices.numaflow.numaproj.io +spec: + group: numaflow.numaproj.io + names: + kind: MonoVertex + listKind: MonoVertexList + plural: monovertices + shortNames: + - mvtx + singular: monovertex + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.reason + name: Reason + type: string + - jsonPath: .status.message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + type: boolean + containerTemplate: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + daemonTemplate: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + type: boolean + containerTemplate: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + dnsPolicy: + type: string + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + initContainerTemplate: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + nodeSelector: + additionalProperties: + type: string + type: object + priority: + format: int32 + type: integer + priorityClassName: + type: string + replicas: + format: int32 + type: integer + runtimeClassName: + type: string + securityContext: + properties: + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccountName: + type: string + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + type: object + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + dnsPolicy: + type: string + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + initContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + limits: + properties: + readBatchSize: + default: 500 + format: int64 + type: integer + readTimeout: + default: 1s + type: string + type: object + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + nodeSelector: + additionalProperties: + type: string + type: object + priority: + format: int32 + type: integer + priorityClassName: + type: string + replicas: + default: 1 + format: int32 + type: integer + runtimeClassName: + type: string + scale: + properties: + cooldownSeconds: + format: int32 + type: integer + disabled: + type: boolean + lookbackSeconds: + format: int32 + type: integer + max: + format: int32 + type: integer + min: + format: int32 + type: integer + replicasPerScale: + format: int32 + type: integer + scaleDownCooldownSeconds: + format: int32 + type: integer + scaleUpCooldownSeconds: + format: int32 + type: integer + targetBufferAvailability: + format: int32 + type: integer + targetProcessingSeconds: + format: int32 + type: integer + zeroReplicaSleepSeconds: + format: int32 + type: integer + type: object + securityContext: + properties: + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccountName: + type: string + sidecars: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + sink: + properties: + blackhole: + type: object + fallback: + properties: + blackhole: + type: object + kafka: + properties: + brokers: + items: + type: string + type: array + config: + type: string + sasl: + properties: + gssapi: + properties: + authType: + type: string + kerberosConfigSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + keytabSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + realm: + type: string + serviceName: + type: string + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - authType + - realm + - serviceName + - usernameSecret + type: object + mechanism: + type: string + plain: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha256: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha512: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + required: + - mechanism + type: object + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + topic: + type: string + required: + - topic + type: object + log: + type: object + udsink: + properties: + container: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + required: + - container + type: object + type: object + kafka: + properties: + brokers: + items: + type: string + type: array + config: + type: string + sasl: + properties: + gssapi: + properties: + authType: + type: string + kerberosConfigSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + keytabSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + realm: + type: string + serviceName: + type: string + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - authType + - realm + - serviceName + - usernameSecret + type: object + mechanism: + type: string + plain: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha256: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha512: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + required: + - mechanism + type: object + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + topic: + type: string + required: + - topic + type: object + log: + type: object + udsink: + properties: + container: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + required: + - container + type: object + type: object + source: + properties: + generator: + properties: + duration: + default: 1s + type: string + jitter: + default: 0s + type: string + keyCount: + format: int32 + type: integer + msgSize: + default: 8 + format: int32 + type: integer + rpu: + default: 5 + format: int64 + type: integer + value: + format: int64 + type: integer + valueBlob: + type: string + type: object + http: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + service: + type: boolean + type: object + jetstream: + properties: + auth: + properties: + basic: + properties: + password: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + user: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + nkey: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + token: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + stream: + type: string + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + url: + type: string + required: + - stream + - url + type: object + kafka: + properties: + brokers: + items: + type: string + type: array + config: + type: string + consumerGroup: + type: string + sasl: + properties: + gssapi: + properties: + authType: + type: string + kerberosConfigSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + keytabSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + realm: + type: string + serviceName: + type: string + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - authType + - realm + - serviceName + - usernameSecret + type: object + mechanism: + type: string + plain: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha256: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha512: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + required: + - mechanism + type: object + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + topic: + type: string + required: + - topic + type: object + nats: + properties: + auth: + properties: + basic: + properties: + password: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + user: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + nkey: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + token: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + queue: + type: string + subject: + type: string + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + url: + type: string + required: + - queue + - subject + - url + type: object + serving: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + msgIDHeaderKey: + type: string + service: + type: boolean + store: + properties: + ttl: + type: string + url: + type: string + required: + - url + type: object + required: + - msgIDHeaderKey + - store + type: object + transformer: + properties: + builtin: + properties: + args: + items: + type: string + type: array + kwargs: + additionalProperties: + type: string + type: object + name: + enum: + - eventTimeExtractor + - filter + - timeExtractionFilter + type: string + required: + - name + type: object + container: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + udsource: + properties: + container: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + required: + - container + type: object + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastScaledAt: + format: date-time + type: string + lastUpdated: + format: date-time + type: string + message: + type: string + observedGeneration: + format: int64 + type: integer + phase: + enum: + - "" + - Running + - Failed + - Pausing + - Paused + - Deleting + type: string + reason: + type: string + replicas: + format: int32 + type: integer + selector: + type: string + required: + - replicas + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.8.0 @@ -17954,6 +23493,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale verbs: - create - delete @@ -17984,6 +23527,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale verbs: - create - delete @@ -18014,6 +23561,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale verbs: - get - list @@ -18041,6 +23592,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale verbs: - create - delete @@ -18144,6 +23699,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale verbs: - create - delete diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 56526100f3..5e5b823e35 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -2613,6 +2613,5545 @@ status: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: monovertices.numaflow.numaproj.io +spec: + group: numaflow.numaproj.io + names: + kind: MonoVertex + listKind: MonoVertexList + plural: monovertices + shortNames: + - mvtx + singular: monovertex + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.reason + name: Reason + type: string + - jsonPath: .status.message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + type: boolean + containerTemplate: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + daemonTemplate: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + type: boolean + containerTemplate: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + dnsPolicy: + type: string + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + initContainerTemplate: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + nodeSelector: + additionalProperties: + type: string + type: object + priority: + format: int32 + type: integer + priorityClassName: + type: string + replicas: + format: int32 + type: integer + runtimeClassName: + type: string + securityContext: + properties: + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccountName: + type: string + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + type: object + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + dnsPolicy: + type: string + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + initContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + limits: + properties: + readBatchSize: + default: 500 + format: int64 + type: integer + readTimeout: + default: 1s + type: string + type: object + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + nodeSelector: + additionalProperties: + type: string + type: object + priority: + format: int32 + type: integer + priorityClassName: + type: string + replicas: + default: 1 + format: int32 + type: integer + runtimeClassName: + type: string + scale: + properties: + cooldownSeconds: + format: int32 + type: integer + disabled: + type: boolean + lookbackSeconds: + format: int32 + type: integer + max: + format: int32 + type: integer + min: + format: int32 + type: integer + replicasPerScale: + format: int32 + type: integer + scaleDownCooldownSeconds: + format: int32 + type: integer + scaleUpCooldownSeconds: + format: int32 + type: integer + targetBufferAvailability: + format: int32 + type: integer + targetProcessingSeconds: + format: int32 + type: integer + zeroReplicaSleepSeconds: + format: int32 + type: integer + type: object + securityContext: + properties: + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccountName: + type: string + sidecars: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + sink: + properties: + blackhole: + type: object + fallback: + properties: + blackhole: + type: object + kafka: + properties: + brokers: + items: + type: string + type: array + config: + type: string + sasl: + properties: + gssapi: + properties: + authType: + type: string + kerberosConfigSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + keytabSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + realm: + type: string + serviceName: + type: string + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - authType + - realm + - serviceName + - usernameSecret + type: object + mechanism: + type: string + plain: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha256: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha512: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + required: + - mechanism + type: object + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + topic: + type: string + required: + - topic + type: object + log: + type: object + udsink: + properties: + container: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + required: + - container + type: object + type: object + kafka: + properties: + brokers: + items: + type: string + type: array + config: + type: string + sasl: + properties: + gssapi: + properties: + authType: + type: string + kerberosConfigSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + keytabSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + realm: + type: string + serviceName: + type: string + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - authType + - realm + - serviceName + - usernameSecret + type: object + mechanism: + type: string + plain: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha256: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha512: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + required: + - mechanism + type: object + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + topic: + type: string + required: + - topic + type: object + log: + type: object + udsink: + properties: + container: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + required: + - container + type: object + type: object + source: + properties: + generator: + properties: + duration: + default: 1s + type: string + jitter: + default: 0s + type: string + keyCount: + format: int32 + type: integer + msgSize: + default: 8 + format: int32 + type: integer + rpu: + default: 5 + format: int64 + type: integer + value: + format: int64 + type: integer + valueBlob: + type: string + type: object + http: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + service: + type: boolean + type: object + jetstream: + properties: + auth: + properties: + basic: + properties: + password: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + user: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + nkey: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + token: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + stream: + type: string + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + url: + type: string + required: + - stream + - url + type: object + kafka: + properties: + brokers: + items: + type: string + type: array + config: + type: string + consumerGroup: + type: string + sasl: + properties: + gssapi: + properties: + authType: + type: string + kerberosConfigSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + keytabSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + realm: + type: string + serviceName: + type: string + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - authType + - realm + - serviceName + - usernameSecret + type: object + mechanism: + type: string + plain: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha256: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + scramsha512: + properties: + handshake: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + userSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - handshake + - userSecret + type: object + required: + - mechanism + type: object + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + topic: + type: string + required: + - topic + type: object + nats: + properties: + auth: + properties: + basic: + properties: + password: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + user: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + nkey: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + token: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + queue: + type: string + subject: + type: string + tls: + properties: + caCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + certSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + url: + type: string + required: + - queue + - subject + - url + type: object + serving: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + msgIDHeaderKey: + type: string + service: + type: boolean + store: + properties: + ttl: + type: string + url: + type: string + required: + - url + type: object + required: + - msgIDHeaderKey + - store + type: object + transformer: + properties: + builtin: + properties: + args: + items: + type: string + type: array + kwargs: + additionalProperties: + type: string + type: object + name: + enum: + - eventTimeExtractor + - filter + - timeExtractionFilter + type: string + required: + - name + type: object + container: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + udsource: + properties: + container: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + type: object + required: + - container + type: object + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastScaledAt: + format: date-time + type: string + lastUpdated: + format: date-time + type: string + message: + type: string + observedGeneration: + format: int64 + type: integer + phase: + enum: + - "" + - Running + - Failed + - Pausing + - Paused + - Deleting + type: string + reason: + type: string + replicas: + format: int32 + type: integer + selector: + type: string + required: + - replicas + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.8.0 @@ -17932,6 +23471,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale verbs: - create - delete @@ -18035,6 +23578,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale verbs: - create - delete diff --git a/config/namespace-install/rbac/controller-manager/numaflow-role.yaml b/config/namespace-install/rbac/controller-manager/numaflow-role.yaml index 85da06b646..6fc0b05696 100644 --- a/config/namespace-install/rbac/controller-manager/numaflow-role.yaml +++ b/config/namespace-install/rbac/controller-manager/numaflow-role.yaml @@ -29,6 +29,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale - apiGroups: - coordination.k8s.io resources: diff --git a/config/namespace-install/rbac/numaflow-server/numaflow-server-role.yaml b/config/namespace-install/rbac/numaflow-server/numaflow-server-role.yaml index 625e4b596b..fceb38afe5 100644 --- a/config/namespace-install/rbac/numaflow-server/numaflow-server-role.yaml +++ b/config/namespace-install/rbac/numaflow-server/numaflow-server-role.yaml @@ -29,6 +29,10 @@ rules: - vertices/finalizers - vertices/status - vertices/scale + - monovertices + - monovertices/finalizers + - monovertices/status + - monovertices/scale - apiGroups: - "" resources: diff --git a/docs/APIs.md b/docs/APIs.md index 8b42812244..2fcab4c372 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -38,6 +38,7 @@ AbstractPodTemplate DaemonTemplate, JetStreamBufferService, JobTemplate, +MonoVertexSpec, NativeRedis, SideInputsManagerTemplate, VertexTemplate) @@ -1480,6 +1481,7 @@ ContainerTemplate DaemonTemplate, JetStreamBufferService, JobTemplate, +MonoVertexSpec, NativeRedis, SideInputsManagerTemplate, VertexTemplate) @@ -1608,6 +1610,7 @@ DaemonTemplate

(Appears on: +MonoVertexSpec, Templates)

@@ -2849,6 +2852,192 @@ Kubernetes core/v1.ResourceRequirements +

+ +GetMonoVertexDaemonDeploymentReq +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +Image
string +
+ +
+ +PullPolicy
+ +Kubernetes core/v1.PullPolicy +
+ +
+ +Env
+ +\[\]Kubernetes core/v1.EnvVar +
+ +
+ +DefaultResources
+ +Kubernetes core/v1.ResourceRequirements +
+ +
+ +

+ +GetMonoVertexPodSpecReq +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +Image
string +
+ +
+ +PullPolicy
+ +Kubernetes core/v1.PullPolicy +
+ +
+ +Env
+ +\[\]Kubernetes core/v1.EnvVar +
+ +
+ +DefaultResources
+ +Kubernetes core/v1.ResourceRequirements +
+ +
+

GetRedisServiceSpecReq @@ -4727,10 +4916,867 @@ Numaflow defaults to 30 (Optional)

-Specifies the number of retries before marking this job failed. More -info: -https://kubernetes.io/docs/concepts/workloads/controllers/job/#pod-backoff-failure-policy -Numaflow defaults to 20 +Specifies the number of retries before marking this job failed. More +info: +https://kubernetes.io/docs/concepts/workloads/controllers/job/#pod-backoff-failure-policy +Numaflow defaults to 20 +

+ + + + + + + + + +

+ +KRB5AuthType (string alias) +

+ +

+ +

+ +(Appears on: +GSSAPI) +

+ +

+ +

+ +KRB5AuthType describes the kerberos auth type +

+ +

+ +

+ +KafkaSink +

+ +

+ +(Appears on: +AbstractSink) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +brokers
\[\]string +
+ +
+ +topic
string +
+ +
+ +tls
+TLS +
+ +(Optional) +

+ +TLS user to configure TLS connection for kafka broker TLS.enable=true +default for TLS. +

+ +
+ +config
string +
+ +(Optional) +
+ +sasl
+ SASL +
+ +(Optional) +

+ +SASL user to configure SASL connection for kafka broker SASL.enable=true +default for SASL. +

+ +
+ +

+ +KafkaSource +

+ +

+ +(Appears on: +Source) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +brokers
\[\]string +
+ +
+ +topic
string +
+ +
+ +consumerGroup
string +
+ +
+ +tls
+TLS +
+ +(Optional) +

+ +TLS user to configure TLS connection for kafka broker TLS.enable=true +default for TLS. +

+ +
+ +config
string +
+ +(Optional) +
+ +sasl
+ SASL +
+ +(Optional) +

+ +SASL user to configure SASL connection for kafka broker SASL.enable=true +default for SASL. +

+ +
+ +

+ +Lifecycle +

+ +

+ +(Appears on: +PipelineSpec) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +deleteGracePeriodSeconds
int32 +
+ +(Optional) +

+ +DeleteGracePeriodSeconds used to delete pipeline gracefully +

+ +
+ +desiredPhase
+ PipelinePhase + +
+ +(Optional) +

+ +DesiredPhase used to bring the pipeline from current phase to desired +phase +

+ +
+ +pauseGracePeriodSeconds
int32 +
+ +(Optional) +

+ +PauseGracePeriodSeconds used to pause pipeline gracefully +

+ +
+ +

+ +Log +

+ +

+ +(Appears on: +AbstractSink) +

+ +

+ +

+ +

+ +LogicOperator (string alias) +

+ +

+ +

+ +(Appears on: +TagConditions) +

+ +

+ +

+ +

+ +Metadata +

+ +

+ +(Appears on: +AbstractPodTemplate) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +annotations
map\[string\]string +
+ +
+ +labels
map\[string\]string +
+ +
+ +

+ +MonoVertex +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +metadata
+ +Kubernetes meta/v1.ObjectMeta +
+ +Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+ +spec
+ MonoVertexSpec + +
+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +replicas
int32 +
+ +(Optional) +
+ +source
+ Source +
+ +
+ +sink
+ Sink +
+ +
+ +AbstractPodTemplate
+ +AbstractPodTemplate +
+ +

+ +(Members of AbstractPodTemplate are embedded into this +type.) +

+ +(Optional) +
+ +containerTemplate
+ +ContainerTemplate +
+ +(Optional) +

+ +Container template for the main numa container. +

+ +
+ +volumes
+ +\[\]Kubernetes core/v1.Volume +
+ +(Optional) +
+ +limits
+ +MonoVertexLimits +
+ +(Optional) +

+ +Limits define the limitations such as buffer read batch size for all the +vertices of a pipeline, will override pipeline level settings +

+ +
+ +scale
+ Scale +
+ +(Optional) +

+ +Settings for autoscaling +

+ +
+ +initContainers
+ +\[\]Kubernetes core/v1.Container +
+ +(Optional) +

+ +List of customized init containers belonging to the pod. More info: +https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +

+ +
+ +sidecars
+ +\[\]Kubernetes core/v1.Container +
+ +(Optional) +

+ +List of customized sidecar containers belonging to the pod. +

+ +
+ +daemonTemplate
+ DaemonTemplate + +
+ +(Optional) +

+ +Template for the daemon service deployment. +

+ +
+ +
+ +status
+ +MonoVertexStatus +
+ +(Optional) +
+ +

+ +MonoVertexLimits +

+ +

+ +(Appears on: +MonoVertexSpec) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -4741,9 +5787,9 @@ Numaflow defaults to 20
+ +Field + + +Description +
+ +readBatchSize
uint64 +
+ +(Optional) +

+ +Read batch size from the source. +

+ +
+ +readTimeout
+ +Kubernetes meta/v1.Duration +
+ +(Optional) +

+ +Read timeout duration from the source.

-

+

-KRB5AuthType (string alias) +MonoVertexPhase (string alias)

@@ -4751,27 +5797,22 @@ KRB5AuthType (string alias)

(Appears on: -GSSAPI) +MonoVertexStatus)

-

- -KRB5AuthType describes the kerberos auth type -

-

-

+

-KafkaSink +MonoVertexSpec

(Appears on: -AbstractSink) +MonoVertex)

@@ -4804,11 +5845,12 @@ Description -brokers
\[\]string +replicas
int32 +(Optional) @@ -4817,7 +5859,8 @@ Description -topic
string +source
+ Source @@ -4830,19 +5873,12 @@ Description -tls
-TLS +sink
+ Sink -(Optional) -

- -TLS user to configure TLS connection for kafka broker TLS.enable=true -default for TLS. -

- @@ -4851,11 +5887,19 @@ default for TLS. -config
string +AbstractPodTemplate
+ +AbstractPodTemplate +

+ +(Members of AbstractPodTemplate are embedded into this +type.) +

+ (Optional) @@ -4865,8 +5909,9 @@ default for TLS. -sasl
- SASL +containerTemplate
+ +ContainerTemplate @@ -4874,64 +5919,25 @@ default for TLS. (Optional)

-SASL user to configure SASL connection for kafka broker SASL.enable=true -default for SASL. +Container template for the main numa container.

- - - - -

- -KafkaSource -

- -

- -(Appears on: -Source) -

- -

- -

- - - - - - - - - - - - - - - - - @@ -4940,11 +5946,20 @@ Description @@ -4953,11 +5968,18 @@ Description @@ -4966,8 +5988,9 @@ Description @@ -4987,12 +6010,19 @@ default for TLS. @@ -5001,8 +6031,9 @@ default for TLS. @@ -5022,15 +6052,15 @@ default for SASL.
- -Field - - -Description -
-brokers
\[\]string +volumes
+ +\[\]Kubernetes core/v1.Volume
+(Optional)
-topic
string +limits
+ +MonoVertexLimits
+(Optional) +

+ +Limits define the limitations such as buffer read batch size for all the +vertices of a pipeline, will override pipeline level settings +

+
-consumerGroup
string +scale
+ Scale
+(Optional) +

+ +Settings for autoscaling +

+
-tls
-TLS +initContainers
+ +\[\]Kubernetes core/v1.Container
@@ -4975,8 +5998,8 @@ TLS (Optional)

-TLS user to configure TLS connection for kafka broker TLS.enable=true -default for TLS. +List of customized init containers belonging to the pod. More info: +https://kubernetes.io/docs/concepts/workloads/pods/init-containers/

-config
string +sidecars
+ +\[\]Kubernetes core/v1.Container
(Optional) +

+ +List of customized sidecar containers belonging to the pod. +

+
-sasl
- SASL +daemonTemplate
+ DaemonTemplate +
@@ -5010,8 +6041,7 @@ default for TLS. (Optional)

-SASL user to configure SASL connection for kafka broker SASL.enable=true -default for SASL. +Template for the daemon service deployment.

-

+

-Lifecycle +MonoVertexStatus

(Appears on: -PipelineSpec) +MonoVertex)

@@ -5063,15 +6093,15 @@ Description -deleteGracePeriodSeconds
int32 +Status
+ Status -(Optional)

-DeleteGracePeriodSeconds used to delete pipeline gracefully +(Members of Status are embedded into this type.)

@@ -5082,20 +6112,13 @@ DeleteGracePeriodSeconds used to delete pipeline gracefully -desiredPhase
- PipelinePhase - +phase
+ +MonoVertexPhase -(Optional) -

- -DesiredPhase used to bring the pipeline from current phase to desired -phase -

- @@ -5104,99 +6127,76 @@ phase -pauseGracePeriodSeconds
int32 +replicas
uint32 -(Optional) -

- -PauseGracePeriodSeconds used to pause pipeline gracefully -

- - - - - -

- -Log -

- -

- -(Appears on: -AbstractSink) -

+ -

+ -

+selector
string + -

+ -LogicOperator (string alias) -

+ -

+ -

+ -(Appears on: -TagConditions) -

+ -

+reason
string + -

+ -

+ -Metadata -

+ -

+ -(Appears on: -AbstractPodTemplate) -

+ -

+message
string + -

+ - + - + - - - - - -
+ -Field - +lastUpdated
+ +Kubernetes meta/v1.Time +
+ -Description - +
-annotations
map\[string\]string +lastScaledAt
+ +Kubernetes meta/v1.Time
@@ -5209,7 +6209,7 @@ Description -labels
map\[string\]string +observedGeneration
int64
@@ -7311,7 +8311,8 @@ Scale

(Appears on: -AbstractVertex) +AbstractVertex, +MonoVertexSpec)

@@ -8139,7 +9140,8 @@ Sink

(Appears on: -AbstractVertex) +AbstractVertex, +MonoVertexSpec)

@@ -8329,7 +9331,8 @@ Source

(Appears on: -AbstractVertex) +AbstractVertex, +MonoVertexSpec)

@@ -8498,6 +9501,7 @@ Status (Appears on: InterStepBufferServiceStatus, +MonoVertexStatus, PipelineStatus, VertexStatus)

diff --git a/examples/21-simple-mono-vertex.yaml b/examples/21-simple-mono-vertex.yaml new file mode 100644 index 0000000000..b1f7dbd1fd --- /dev/null +++ b/examples/21-simple-mono-vertex.yaml @@ -0,0 +1,13 @@ +apiVersion: numaflow.numaproj.io/v1alpha1 +kind: MonoVertex +metadata: + name: simple-mono-vertex +spec: + source: + udsource: + container: + image: quay.io/numaio/numaflow-java/source-simple-source:stable + sink: + udsink: + container: + image: quay.io/numaio/numaflow-java/simple-sink:stable diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index cb121bd144..3ab861b73b 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -28,7 +28,7 @@ bash -x ${CODEGEN_PKG}/generate-groups.sh "deepcopy" \ bash -x ${CODEGEN_PKG}/generate-groups.sh "client,informer,lister" \ github.com/numaproj/numaflow/pkg/client github.com/numaproj/numaflow/pkg/apis \ "numaflow:v1alpha1" \ - --plural-exceptions="Vertex:Vertices" \ + --plural-exceptions="Vertex:Vertices,MonoVertex:MonoVertices" \ --go-header-file hack/boilerplate/boilerplate.go.txt # gofmt the tree diff --git a/pkg/apis/numaflow/v1alpha1/const.go b/pkg/apis/numaflow/v1alpha1/const.go index 862f55f6e9..19a3274cef 100644 --- a/pkg/apis/numaflow/v1alpha1/const.go +++ b/pkg/apis/numaflow/v1alpha1/const.go @@ -34,6 +34,7 @@ const ( KeyISBSvcType = "numaflow.numaproj.io/isbsvc-type" KeyPipelineName = "numaflow.numaproj.io/pipeline-name" KeyVertexName = "numaflow.numaproj.io/vertex-name" + KeyMonoVertexName = "numaflow.numaproj.io/mono-vertex-name" KeyReplica = "numaflow.numaproj.io/replica" KeySideInputName = "numaflow.numaproj.io/side-input-name" KeyPauseTimestamp = "numaflow.numaproj.io/pause-timestamp" @@ -89,25 +90,30 @@ const ( ComponentISBSvc = "isbsvc" ComponentDaemon = "daemon" ComponentVertex = "vertex" + ComponentMonoVertex = "mono-vertex" + ComponentMonoVertexDaemon = "mono-vertex-daemon" ComponentJob = "job" ComponentSideInputManager = "side-inputs-manager" ComponentUXServer = "numaflow-ux" // controllers - ControllerISBSvc = "isbsvc-controller" - ControllerPipeline = "pipeline-controller" - ControllerVertex = "vertex-controller" + ControllerISBSvc = "isbsvc-controller" + ControllerPipeline = "pipeline-controller" + ControllerVertex = "vertex-controller" + ControllerMonoVertex = "mono-vertex-controller" // ENV vars EnvNamespace = "NUMAFLOW_NAMESPACE" EnvPipelineName = "NUMAFLOW_PIPELINE_NAME" EnvVertexName = "NUMAFLOW_VERTEX_NAME" + EnvMonoVertexName = "NUMAFLOW_MONO_VERTEX_NAME" EnvCallbackEnabled = "NUMAFLOW_CALLBACK_ENABLED" EnvCallbackURL = "NUMAFLOW_CALLBACK_URL" EnvPod = "NUMAFLOW_POD" EnvReplica = "NUMAFLOW_REPLICA" EnvVertexObject = "NUMAFLOW_VERTEX_OBJECT" EnvPipelineObject = "NUMAFLOW_PIPELINE_OBJECT" + EnvMonoVertexObject = "NUMAFLOW_MONO_VERTEX_OBJECT" EnvSideInputObject = "NUMAFLOW_SIDE_INPUT_OBJECT" EnvImage = "NUMAFLOW_IMAGE" EnvImagePullPolicy = "NUMAFLOW_IMAGE_PULL_POLICY" @@ -150,6 +156,9 @@ const ( VertexHTTPSPort = 8443 VertexHTTPSPortName = "https" DaemonServicePort = 4327 + MonoVertexMetricsPort = 2469 + MonoVertexMetricsPortName = "metrics" + MonoVertexDaemonServicePort = 4327 DefaultRequeueAfter = 10 * time.Second @@ -159,6 +168,7 @@ const ( DefaultBufferLength = 30000 DefaultBufferUsageLimit = 0.8 DefaultReadBatchSize = 500 + DefaultReadTimeout = 1 * time.Second // Auto scaling DefaultLookbackSeconds = 120 // Default lookback seconds for calculating avg rate and pending diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index ec8ed47f5c..bd85ab3af0 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -608,10 +608,66 @@ func (m *GetJetStreamStatefulSetSpecReq) XXX_DiscardUnknown() { var xxx_messageInfo_GetJetStreamStatefulSetSpecReq proto.InternalMessageInfo +func (m *GetMonoVertexDaemonDeploymentReq) Reset() { *m = GetMonoVertexDaemonDeploymentReq{} } +func (*GetMonoVertexDaemonDeploymentReq) ProtoMessage() {} +func (*GetMonoVertexDaemonDeploymentReq) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{20} +} +func (m *GetMonoVertexDaemonDeploymentReq) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetMonoVertexDaemonDeploymentReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GetMonoVertexDaemonDeploymentReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetMonoVertexDaemonDeploymentReq.Merge(m, src) +} +func (m *GetMonoVertexDaemonDeploymentReq) XXX_Size() int { + return m.Size() +} +func (m *GetMonoVertexDaemonDeploymentReq) XXX_DiscardUnknown() { + xxx_messageInfo_GetMonoVertexDaemonDeploymentReq.DiscardUnknown(m) +} + +var xxx_messageInfo_GetMonoVertexDaemonDeploymentReq proto.InternalMessageInfo + +func (m *GetMonoVertexPodSpecReq) Reset() { *m = GetMonoVertexPodSpecReq{} } +func (*GetMonoVertexPodSpecReq) ProtoMessage() {} +func (*GetMonoVertexPodSpecReq) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{21} +} +func (m *GetMonoVertexPodSpecReq) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetMonoVertexPodSpecReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GetMonoVertexPodSpecReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetMonoVertexPodSpecReq.Merge(m, src) +} +func (m *GetMonoVertexPodSpecReq) XXX_Size() int { + return m.Size() +} +func (m *GetMonoVertexPodSpecReq) XXX_DiscardUnknown() { + xxx_messageInfo_GetMonoVertexPodSpecReq.DiscardUnknown(m) +} + +var xxx_messageInfo_GetMonoVertexPodSpecReq proto.InternalMessageInfo + func (m *GetRedisServiceSpecReq) Reset() { *m = GetRedisServiceSpecReq{} } func (*GetRedisServiceSpecReq) ProtoMessage() {} func (*GetRedisServiceSpecReq) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{20} + return fileDescriptor_9d0d1b17d3865563, []int{22} } func (m *GetRedisServiceSpecReq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -639,7 +695,7 @@ var xxx_messageInfo_GetRedisServiceSpecReq proto.InternalMessageInfo func (m *GetRedisStatefulSetSpecReq) Reset() { *m = GetRedisStatefulSetSpecReq{} } func (*GetRedisStatefulSetSpecReq) ProtoMessage() {} func (*GetRedisStatefulSetSpecReq) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{21} + return fileDescriptor_9d0d1b17d3865563, []int{23} } func (m *GetRedisStatefulSetSpecReq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -667,7 +723,7 @@ var xxx_messageInfo_GetRedisStatefulSetSpecReq proto.InternalMessageInfo func (m *GetSideInputDeploymentReq) Reset() { *m = GetSideInputDeploymentReq{} } func (*GetSideInputDeploymentReq) ProtoMessage() {} func (*GetSideInputDeploymentReq) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{22} + return fileDescriptor_9d0d1b17d3865563, []int{24} } func (m *GetSideInputDeploymentReq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -695,7 +751,7 @@ var xxx_messageInfo_GetSideInputDeploymentReq proto.InternalMessageInfo func (m *GetVertexPodSpecReq) Reset() { *m = GetVertexPodSpecReq{} } func (*GetVertexPodSpecReq) ProtoMessage() {} func (*GetVertexPodSpecReq) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{23} + return fileDescriptor_9d0d1b17d3865563, []int{25} } func (m *GetVertexPodSpecReq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -723,7 +779,7 @@ var xxx_messageInfo_GetVertexPodSpecReq proto.InternalMessageInfo func (m *GroupBy) Reset() { *m = GroupBy{} } func (*GroupBy) ProtoMessage() {} func (*GroupBy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{24} + return fileDescriptor_9d0d1b17d3865563, []int{26} } func (m *GroupBy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -751,7 +807,7 @@ var xxx_messageInfo_GroupBy proto.InternalMessageInfo func (m *HTTPSource) Reset() { *m = HTTPSource{} } func (*HTTPSource) ProtoMessage() {} func (*HTTPSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{25} + return fileDescriptor_9d0d1b17d3865563, []int{27} } func (m *HTTPSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -779,7 +835,7 @@ var xxx_messageInfo_HTTPSource proto.InternalMessageInfo func (m *IdleSource) Reset() { *m = IdleSource{} } func (*IdleSource) ProtoMessage() {} func (*IdleSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{26} + return fileDescriptor_9d0d1b17d3865563, []int{28} } func (m *IdleSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -807,7 +863,7 @@ var xxx_messageInfo_IdleSource proto.InternalMessageInfo func (m *InterStepBufferService) Reset() { *m = InterStepBufferService{} } func (*InterStepBufferService) ProtoMessage() {} func (*InterStepBufferService) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{27} + return fileDescriptor_9d0d1b17d3865563, []int{29} } func (m *InterStepBufferService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -835,7 +891,7 @@ var xxx_messageInfo_InterStepBufferService proto.InternalMessageInfo func (m *InterStepBufferServiceList) Reset() { *m = InterStepBufferServiceList{} } func (*InterStepBufferServiceList) ProtoMessage() {} func (*InterStepBufferServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{28} + return fileDescriptor_9d0d1b17d3865563, []int{30} } func (m *InterStepBufferServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -863,7 +919,7 @@ var xxx_messageInfo_InterStepBufferServiceList proto.InternalMessageInfo func (m *InterStepBufferServiceSpec) Reset() { *m = InterStepBufferServiceSpec{} } func (*InterStepBufferServiceSpec) ProtoMessage() {} func (*InterStepBufferServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{29} + return fileDescriptor_9d0d1b17d3865563, []int{31} } func (m *InterStepBufferServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -891,7 +947,7 @@ var xxx_messageInfo_InterStepBufferServiceSpec proto.InternalMessageInfo func (m *InterStepBufferServiceStatus) Reset() { *m = InterStepBufferServiceStatus{} } func (*InterStepBufferServiceStatus) ProtoMessage() {} func (*InterStepBufferServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{30} + return fileDescriptor_9d0d1b17d3865563, []int{32} } func (m *InterStepBufferServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -919,7 +975,7 @@ var xxx_messageInfo_InterStepBufferServiceStatus proto.InternalMessageInfo func (m *JetStreamBufferService) Reset() { *m = JetStreamBufferService{} } func (*JetStreamBufferService) ProtoMessage() {} func (*JetStreamBufferService) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{31} + return fileDescriptor_9d0d1b17d3865563, []int{33} } func (m *JetStreamBufferService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -947,7 +1003,7 @@ var xxx_messageInfo_JetStreamBufferService proto.InternalMessageInfo func (m *JetStreamConfig) Reset() { *m = JetStreamConfig{} } func (*JetStreamConfig) ProtoMessage() {} func (*JetStreamConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{32} + return fileDescriptor_9d0d1b17d3865563, []int{34} } func (m *JetStreamConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -975,7 +1031,7 @@ var xxx_messageInfo_JetStreamConfig proto.InternalMessageInfo func (m *JetStreamSource) Reset() { *m = JetStreamSource{} } func (*JetStreamSource) ProtoMessage() {} func (*JetStreamSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{33} + return fileDescriptor_9d0d1b17d3865563, []int{35} } func (m *JetStreamSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1003,7 +1059,7 @@ var xxx_messageInfo_JetStreamSource proto.InternalMessageInfo func (m *JobTemplate) Reset() { *m = JobTemplate{} } func (*JobTemplate) ProtoMessage() {} func (*JobTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{34} + return fileDescriptor_9d0d1b17d3865563, []int{36} } func (m *JobTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1031,7 +1087,7 @@ var xxx_messageInfo_JobTemplate proto.InternalMessageInfo func (m *KafkaSink) Reset() { *m = KafkaSink{} } func (*KafkaSink) ProtoMessage() {} func (*KafkaSink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{35} + return fileDescriptor_9d0d1b17d3865563, []int{37} } func (m *KafkaSink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1059,7 +1115,7 @@ var xxx_messageInfo_KafkaSink proto.InternalMessageInfo func (m *KafkaSource) Reset() { *m = KafkaSource{} } func (*KafkaSource) ProtoMessage() {} func (*KafkaSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{36} + return fileDescriptor_9d0d1b17d3865563, []int{38} } func (m *KafkaSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1087,7 +1143,7 @@ var xxx_messageInfo_KafkaSource proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{37} + return fileDescriptor_9d0d1b17d3865563, []int{39} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1115,7 +1171,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *Log) Reset() { *m = Log{} } func (*Log) ProtoMessage() {} func (*Log) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{38} + return fileDescriptor_9d0d1b17d3865563, []int{40} } func (m *Log) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1143,7 +1199,7 @@ var xxx_messageInfo_Log proto.InternalMessageInfo func (m *Metadata) Reset() { *m = Metadata{} } func (*Metadata) ProtoMessage() {} func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{39} + return fileDescriptor_9d0d1b17d3865563, []int{41} } func (m *Metadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1168,10 +1224,150 @@ func (m *Metadata) XXX_DiscardUnknown() { var xxx_messageInfo_Metadata proto.InternalMessageInfo +func (m *MonoVertex) Reset() { *m = MonoVertex{} } +func (*MonoVertex) ProtoMessage() {} +func (*MonoVertex) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{42} +} +func (m *MonoVertex) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MonoVertex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MonoVertex) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonoVertex.Merge(m, src) +} +func (m *MonoVertex) XXX_Size() int { + return m.Size() +} +func (m *MonoVertex) XXX_DiscardUnknown() { + xxx_messageInfo_MonoVertex.DiscardUnknown(m) +} + +var xxx_messageInfo_MonoVertex proto.InternalMessageInfo + +func (m *MonoVertexLimits) Reset() { *m = MonoVertexLimits{} } +func (*MonoVertexLimits) ProtoMessage() {} +func (*MonoVertexLimits) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{43} +} +func (m *MonoVertexLimits) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MonoVertexLimits) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MonoVertexLimits) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonoVertexLimits.Merge(m, src) +} +func (m *MonoVertexLimits) XXX_Size() int { + return m.Size() +} +func (m *MonoVertexLimits) XXX_DiscardUnknown() { + xxx_messageInfo_MonoVertexLimits.DiscardUnknown(m) +} + +var xxx_messageInfo_MonoVertexLimits proto.InternalMessageInfo + +func (m *MonoVertexList) Reset() { *m = MonoVertexList{} } +func (*MonoVertexList) ProtoMessage() {} +func (*MonoVertexList) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{44} +} +func (m *MonoVertexList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MonoVertexList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MonoVertexList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonoVertexList.Merge(m, src) +} +func (m *MonoVertexList) XXX_Size() int { + return m.Size() +} +func (m *MonoVertexList) XXX_DiscardUnknown() { + xxx_messageInfo_MonoVertexList.DiscardUnknown(m) +} + +var xxx_messageInfo_MonoVertexList proto.InternalMessageInfo + +func (m *MonoVertexSpec) Reset() { *m = MonoVertexSpec{} } +func (*MonoVertexSpec) ProtoMessage() {} +func (*MonoVertexSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{45} +} +func (m *MonoVertexSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MonoVertexSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MonoVertexSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonoVertexSpec.Merge(m, src) +} +func (m *MonoVertexSpec) XXX_Size() int { + return m.Size() +} +func (m *MonoVertexSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MonoVertexSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MonoVertexSpec proto.InternalMessageInfo + +func (m *MonoVertexStatus) Reset() { *m = MonoVertexStatus{} } +func (*MonoVertexStatus) ProtoMessage() {} +func (*MonoVertexStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{46} +} +func (m *MonoVertexStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MonoVertexStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MonoVertexStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonoVertexStatus.Merge(m, src) +} +func (m *MonoVertexStatus) XXX_Size() int { + return m.Size() +} +func (m *MonoVertexStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MonoVertexStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MonoVertexStatus proto.InternalMessageInfo + func (m *NativeRedis) Reset() { *m = NativeRedis{} } func (*NativeRedis) ProtoMessage() {} func (*NativeRedis) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{40} + return fileDescriptor_9d0d1b17d3865563, []int{47} } func (m *NativeRedis) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1199,7 +1395,7 @@ var xxx_messageInfo_NativeRedis proto.InternalMessageInfo func (m *NatsAuth) Reset() { *m = NatsAuth{} } func (*NatsAuth) ProtoMessage() {} func (*NatsAuth) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{41} + return fileDescriptor_9d0d1b17d3865563, []int{48} } func (m *NatsAuth) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1227,7 +1423,7 @@ var xxx_messageInfo_NatsAuth proto.InternalMessageInfo func (m *NatsSource) Reset() { *m = NatsSource{} } func (*NatsSource) ProtoMessage() {} func (*NatsSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{42} + return fileDescriptor_9d0d1b17d3865563, []int{49} } func (m *NatsSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1255,7 +1451,7 @@ var xxx_messageInfo_NatsSource proto.InternalMessageInfo func (m *NoStore) Reset() { *m = NoStore{} } func (*NoStore) ProtoMessage() {} func (*NoStore) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{43} + return fileDescriptor_9d0d1b17d3865563, []int{50} } func (m *NoStore) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1283,7 +1479,7 @@ var xxx_messageInfo_NoStore proto.InternalMessageInfo func (m *PBQStorage) Reset() { *m = PBQStorage{} } func (*PBQStorage) ProtoMessage() {} func (*PBQStorage) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{44} + return fileDescriptor_9d0d1b17d3865563, []int{51} } func (m *PBQStorage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1311,7 +1507,7 @@ var xxx_messageInfo_PBQStorage proto.InternalMessageInfo func (m *PersistenceStrategy) Reset() { *m = PersistenceStrategy{} } func (*PersistenceStrategy) ProtoMessage() {} func (*PersistenceStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{45} + return fileDescriptor_9d0d1b17d3865563, []int{52} } func (m *PersistenceStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1339,7 +1535,7 @@ var xxx_messageInfo_PersistenceStrategy proto.InternalMessageInfo func (m *Pipeline) Reset() { *m = Pipeline{} } func (*Pipeline) ProtoMessage() {} func (*Pipeline) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{46} + return fileDescriptor_9d0d1b17d3865563, []int{53} } func (m *Pipeline) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1367,7 +1563,7 @@ var xxx_messageInfo_Pipeline proto.InternalMessageInfo func (m *PipelineLimits) Reset() { *m = PipelineLimits{} } func (*PipelineLimits) ProtoMessage() {} func (*PipelineLimits) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{47} + return fileDescriptor_9d0d1b17d3865563, []int{54} } func (m *PipelineLimits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1395,7 +1591,7 @@ var xxx_messageInfo_PipelineLimits proto.InternalMessageInfo func (m *PipelineList) Reset() { *m = PipelineList{} } func (*PipelineList) ProtoMessage() {} func (*PipelineList) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{48} + return fileDescriptor_9d0d1b17d3865563, []int{55} } func (m *PipelineList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1423,7 +1619,7 @@ var xxx_messageInfo_PipelineList proto.InternalMessageInfo func (m *PipelineSpec) Reset() { *m = PipelineSpec{} } func (*PipelineSpec) ProtoMessage() {} func (*PipelineSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{49} + return fileDescriptor_9d0d1b17d3865563, []int{56} } func (m *PipelineSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1451,7 +1647,7 @@ var xxx_messageInfo_PipelineSpec proto.InternalMessageInfo func (m *PipelineStatus) Reset() { *m = PipelineStatus{} } func (*PipelineStatus) ProtoMessage() {} func (*PipelineStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{50} + return fileDescriptor_9d0d1b17d3865563, []int{57} } func (m *PipelineStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1479,7 +1675,7 @@ var xxx_messageInfo_PipelineStatus proto.InternalMessageInfo func (m *RedisBufferService) Reset() { *m = RedisBufferService{} } func (*RedisBufferService) ProtoMessage() {} func (*RedisBufferService) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{51} + return fileDescriptor_9d0d1b17d3865563, []int{58} } func (m *RedisBufferService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1507,7 +1703,7 @@ var xxx_messageInfo_RedisBufferService proto.InternalMessageInfo func (m *RedisConfig) Reset() { *m = RedisConfig{} } func (*RedisConfig) ProtoMessage() {} func (*RedisConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{52} + return fileDescriptor_9d0d1b17d3865563, []int{59} } func (m *RedisConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1535,7 +1731,7 @@ var xxx_messageInfo_RedisConfig proto.InternalMessageInfo func (m *RedisSettings) Reset() { *m = RedisSettings{} } func (*RedisSettings) ProtoMessage() {} func (*RedisSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{53} + return fileDescriptor_9d0d1b17d3865563, []int{60} } func (m *RedisSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1563,7 +1759,7 @@ var xxx_messageInfo_RedisSettings proto.InternalMessageInfo func (m *SASL) Reset() { *m = SASL{} } func (*SASL) ProtoMessage() {} func (*SASL) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{54} + return fileDescriptor_9d0d1b17d3865563, []int{61} } func (m *SASL) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1591,7 +1787,7 @@ var xxx_messageInfo_SASL proto.InternalMessageInfo func (m *SASLPlain) Reset() { *m = SASLPlain{} } func (*SASLPlain) ProtoMessage() {} func (*SASLPlain) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{55} + return fileDescriptor_9d0d1b17d3865563, []int{62} } func (m *SASLPlain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1619,7 +1815,7 @@ var xxx_messageInfo_SASLPlain proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{56} + return fileDescriptor_9d0d1b17d3865563, []int{63} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1647,7 +1843,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ServingSource) Reset() { *m = ServingSource{} } func (*ServingSource) ProtoMessage() {} func (*ServingSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{57} + return fileDescriptor_9d0d1b17d3865563, []int{64} } func (m *ServingSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1675,7 +1871,7 @@ var xxx_messageInfo_ServingSource proto.InternalMessageInfo func (m *ServingStore) Reset() { *m = ServingStore{} } func (*ServingStore) ProtoMessage() {} func (*ServingStore) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{58} + return fileDescriptor_9d0d1b17d3865563, []int{65} } func (m *ServingStore) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1703,7 +1899,7 @@ var xxx_messageInfo_ServingStore proto.InternalMessageInfo func (m *SessionWindow) Reset() { *m = SessionWindow{} } func (*SessionWindow) ProtoMessage() {} func (*SessionWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{59} + return fileDescriptor_9d0d1b17d3865563, []int{66} } func (m *SessionWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1731,7 +1927,7 @@ var xxx_messageInfo_SessionWindow proto.InternalMessageInfo func (m *SideInput) Reset() { *m = SideInput{} } func (*SideInput) ProtoMessage() {} func (*SideInput) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{60} + return fileDescriptor_9d0d1b17d3865563, []int{67} } func (m *SideInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1759,7 +1955,7 @@ var xxx_messageInfo_SideInput proto.InternalMessageInfo func (m *SideInputTrigger) Reset() { *m = SideInputTrigger{} } func (*SideInputTrigger) ProtoMessage() {} func (*SideInputTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{61} + return fileDescriptor_9d0d1b17d3865563, []int{68} } func (m *SideInputTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1787,7 +1983,7 @@ var xxx_messageInfo_SideInputTrigger proto.InternalMessageInfo func (m *SideInputsManagerTemplate) Reset() { *m = SideInputsManagerTemplate{} } func (*SideInputsManagerTemplate) ProtoMessage() {} func (*SideInputsManagerTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{62} + return fileDescriptor_9d0d1b17d3865563, []int{69} } func (m *SideInputsManagerTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1815,7 +2011,7 @@ var xxx_messageInfo_SideInputsManagerTemplate proto.InternalMessageInfo func (m *Sink) Reset() { *m = Sink{} } func (*Sink) ProtoMessage() {} func (*Sink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{63} + return fileDescriptor_9d0d1b17d3865563, []int{70} } func (m *Sink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1843,7 +2039,7 @@ var xxx_messageInfo_Sink proto.InternalMessageInfo func (m *SlidingWindow) Reset() { *m = SlidingWindow{} } func (*SlidingWindow) ProtoMessage() {} func (*SlidingWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{64} + return fileDescriptor_9d0d1b17d3865563, []int{71} } func (m *SlidingWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1871,7 +2067,7 @@ var xxx_messageInfo_SlidingWindow proto.InternalMessageInfo func (m *Source) Reset() { *m = Source{} } func (*Source) ProtoMessage() {} func (*Source) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{65} + return fileDescriptor_9d0d1b17d3865563, []int{72} } func (m *Source) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1899,7 +2095,7 @@ var xxx_messageInfo_Source proto.InternalMessageInfo func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{66} + return fileDescriptor_9d0d1b17d3865563, []int{73} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1927,7 +2123,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo func (m *TLS) Reset() { *m = TLS{} } func (*TLS) ProtoMessage() {} func (*TLS) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{67} + return fileDescriptor_9d0d1b17d3865563, []int{74} } func (m *TLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1955,7 +2151,7 @@ var xxx_messageInfo_TLS proto.InternalMessageInfo func (m *TagConditions) Reset() { *m = TagConditions{} } func (*TagConditions) ProtoMessage() {} func (*TagConditions) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{68} + return fileDescriptor_9d0d1b17d3865563, []int{75} } func (m *TagConditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1983,7 +2179,7 @@ var xxx_messageInfo_TagConditions proto.InternalMessageInfo func (m *Templates) Reset() { *m = Templates{} } func (*Templates) ProtoMessage() {} func (*Templates) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{69} + return fileDescriptor_9d0d1b17d3865563, []int{76} } func (m *Templates) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2011,7 +2207,7 @@ var xxx_messageInfo_Templates proto.InternalMessageInfo func (m *Transformer) Reset() { *m = Transformer{} } func (*Transformer) ProtoMessage() {} func (*Transformer) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{70} + return fileDescriptor_9d0d1b17d3865563, []int{77} } func (m *Transformer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2039,7 +2235,7 @@ var xxx_messageInfo_Transformer proto.InternalMessageInfo func (m *UDF) Reset() { *m = UDF{} } func (*UDF) ProtoMessage() {} func (*UDF) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{71} + return fileDescriptor_9d0d1b17d3865563, []int{78} } func (m *UDF) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2067,7 +2263,7 @@ var xxx_messageInfo_UDF proto.InternalMessageInfo func (m *UDSink) Reset() { *m = UDSink{} } func (*UDSink) ProtoMessage() {} func (*UDSink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{72} + return fileDescriptor_9d0d1b17d3865563, []int{79} } func (m *UDSink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2095,7 +2291,7 @@ var xxx_messageInfo_UDSink proto.InternalMessageInfo func (m *UDSource) Reset() { *m = UDSource{} } func (*UDSource) ProtoMessage() {} func (*UDSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{73} + return fileDescriptor_9d0d1b17d3865563, []int{80} } func (m *UDSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2123,7 +2319,7 @@ var xxx_messageInfo_UDSource proto.InternalMessageInfo func (m *UDTransformer) Reset() { *m = UDTransformer{} } func (*UDTransformer) ProtoMessage() {} func (*UDTransformer) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{74} + return fileDescriptor_9d0d1b17d3865563, []int{81} } func (m *UDTransformer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2151,7 +2347,7 @@ var xxx_messageInfo_UDTransformer proto.InternalMessageInfo func (m *Vertex) Reset() { *m = Vertex{} } func (*Vertex) ProtoMessage() {} func (*Vertex) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{75} + return fileDescriptor_9d0d1b17d3865563, []int{82} } func (m *Vertex) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2179,7 +2375,7 @@ var xxx_messageInfo_Vertex proto.InternalMessageInfo func (m *VertexInstance) Reset() { *m = VertexInstance{} } func (*VertexInstance) ProtoMessage() {} func (*VertexInstance) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{76} + return fileDescriptor_9d0d1b17d3865563, []int{83} } func (m *VertexInstance) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2207,7 +2403,7 @@ var xxx_messageInfo_VertexInstance proto.InternalMessageInfo func (m *VertexLimits) Reset() { *m = VertexLimits{} } func (*VertexLimits) ProtoMessage() {} func (*VertexLimits) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{77} + return fileDescriptor_9d0d1b17d3865563, []int{84} } func (m *VertexLimits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2235,7 +2431,7 @@ var xxx_messageInfo_VertexLimits proto.InternalMessageInfo func (m *VertexList) Reset() { *m = VertexList{} } func (*VertexList) ProtoMessage() {} func (*VertexList) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{78} + return fileDescriptor_9d0d1b17d3865563, []int{85} } func (m *VertexList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2263,7 +2459,7 @@ var xxx_messageInfo_VertexList proto.InternalMessageInfo func (m *VertexSpec) Reset() { *m = VertexSpec{} } func (*VertexSpec) ProtoMessage() {} func (*VertexSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{79} + return fileDescriptor_9d0d1b17d3865563, []int{86} } func (m *VertexSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2291,7 +2487,7 @@ var xxx_messageInfo_VertexSpec proto.InternalMessageInfo func (m *VertexStatus) Reset() { *m = VertexStatus{} } func (*VertexStatus) ProtoMessage() {} func (*VertexStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{80} + return fileDescriptor_9d0d1b17d3865563, []int{87} } func (m *VertexStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2319,7 +2515,7 @@ var xxx_messageInfo_VertexStatus proto.InternalMessageInfo func (m *VertexTemplate) Reset() { *m = VertexTemplate{} } func (*VertexTemplate) ProtoMessage() {} func (*VertexTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{81} + return fileDescriptor_9d0d1b17d3865563, []int{88} } func (m *VertexTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2347,7 +2543,7 @@ var xxx_messageInfo_VertexTemplate proto.InternalMessageInfo func (m *Watermark) Reset() { *m = Watermark{} } func (*Watermark) ProtoMessage() {} func (*Watermark) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{82} + return fileDescriptor_9d0d1b17d3865563, []int{89} } func (m *Watermark) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2375,7 +2571,7 @@ var xxx_messageInfo_Watermark proto.InternalMessageInfo func (m *Window) Reset() { *m = Window{} } func (*Window) ProtoMessage() {} func (*Window) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{83} + return fileDescriptor_9d0d1b17d3865563, []int{90} } func (m *Window) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2425,6 +2621,8 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.GetJetStreamServiceSpecReq.LabelsEntry") proto.RegisterType((*GetJetStreamStatefulSetSpecReq)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.GetJetStreamStatefulSetSpecReq") proto.RegisterMapType((map[string]string)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.GetJetStreamStatefulSetSpecReq.LabelsEntry") + proto.RegisterType((*GetMonoVertexDaemonDeploymentReq)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.GetMonoVertexDaemonDeploymentReq") + proto.RegisterType((*GetMonoVertexPodSpecReq)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.GetMonoVertexPodSpecReq") proto.RegisterType((*GetRedisServiceSpecReq)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.GetRedisServiceSpecReq") proto.RegisterMapType((map[string]string)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.GetRedisServiceSpecReq.LabelsEntry") proto.RegisterType((*GetRedisStatefulSetSpecReq)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.GetRedisStatefulSetSpecReq") @@ -2449,6 +2647,11 @@ func init() { proto.RegisterType((*Metadata)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.Metadata") proto.RegisterMapType((map[string]string)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.Metadata.AnnotationsEntry") proto.RegisterMapType((map[string]string)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.Metadata.LabelsEntry") + proto.RegisterType((*MonoVertex)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.MonoVertex") + proto.RegisterType((*MonoVertexLimits)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.MonoVertexLimits") + proto.RegisterType((*MonoVertexList)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.MonoVertexList") + proto.RegisterType((*MonoVertexSpec)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.MonoVertexSpec") + proto.RegisterType((*MonoVertexStatus)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.MonoVertexStatus") proto.RegisterType((*NativeRedis)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.NativeRedis") proto.RegisterType((*NatsAuth)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.NatsAuth") proto.RegisterType((*NatsSource)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.NatsSource") @@ -2501,458 +2704,471 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 7203 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5b, 0x6c, 0x24, 0xc7, - 0x75, 0xa8, 0xe6, 0x45, 0xce, 0x9c, 0x21, 0xb9, 0xbb, 0xb5, 0xd2, 0x8a, 0x4b, 0xad, 0x76, 0xd6, - 0xed, 0x2b, 0xdd, 0xf5, 0xb5, 0x4d, 0x5e, 0xf1, 0x4a, 0x96, 0xec, 0x6b, 0x5b, 0xe2, 0x90, 0x4b, - 0x2e, 0xb5, 0xe4, 0x2e, 0x7d, 0x86, 0x5c, 0xc9, 0xd6, 0xb5, 0x75, 0x9b, 0xdd, 0xc5, 0x61, 0x6b, - 0x7a, 0xba, 0xc7, 0xdd, 0x3d, 0xdc, 0xa5, 0x1c, 0xc3, 0xaf, 0x0f, 0x29, 0x48, 0x82, 0x04, 0xfe, - 0x32, 0x10, 0x38, 0x41, 0x82, 0x00, 0xfe, 0x30, 0xfc, 0x13, 0xc0, 0xf9, 0x08, 0x10, 0x24, 0x01, - 0x82, 0xc0, 0x09, 0xf2, 0xf0, 0x47, 0x80, 0x38, 0x08, 0x40, 0xc4, 0x0c, 0xf2, 0x91, 0x04, 0x31, - 0x8c, 0x18, 0x48, 0x9c, 0x85, 0x01, 0x07, 0xf5, 0xea, 0xd7, 0xf4, 0xec, 0x92, 0xd3, 0xa4, 0xbc, - 0x4e, 0xfc, 0x37, 0x5d, 0x75, 0xea, 0x9c, 0xea, 0xd3, 0x55, 0x75, 0x9e, 0x75, 0x06, 0x56, 0xda, - 0x56, 0xb0, 0xdb, 0xdf, 0x9e, 0x35, 0xdc, 0xee, 0x9c, 0xd3, 0xef, 0xea, 0x3d, 0xcf, 0x7d, 0x83, - 0xff, 0xd8, 0xb1, 0xdd, 0x3b, 0x73, 0xbd, 0x4e, 0x7b, 0x4e, 0xef, 0x59, 0x7e, 0xd4, 0xb2, 0xf7, - 0x8c, 0x6e, 0xf7, 0x76, 0xf5, 0x67, 0xe6, 0xda, 0xd4, 0xa1, 0x9e, 0x1e, 0x50, 0x73, 0xb6, 0xe7, - 0xb9, 0x81, 0x4b, 0x9e, 0x8f, 0x10, 0xcd, 0x2a, 0x44, 0xb3, 0x6a, 0xd8, 0x6c, 0xaf, 0xd3, 0x9e, - 0x65, 0x88, 0xa2, 0x16, 0x85, 0x68, 0xe6, 0xfd, 0xb1, 0x19, 0xb4, 0xdd, 0xb6, 0x3b, 0xc7, 0xf1, - 0x6d, 0xf7, 0x77, 0xf8, 0x13, 0x7f, 0xe0, 0xbf, 0x04, 0x9d, 0x19, 0xad, 0xf3, 0x82, 0x3f, 0x6b, - 0xb9, 0x6c, 0x5a, 0x73, 0x86, 0xeb, 0xd1, 0xb9, 0xbd, 0x81, 0xb9, 0xcc, 0x3c, 0x1b, 0xc1, 0x74, - 0x75, 0x63, 0xd7, 0x72, 0xa8, 0xb7, 0xaf, 0xde, 0x65, 0xce, 0xa3, 0xbe, 0xdb, 0xf7, 0x0c, 0x7a, - 0xac, 0x51, 0xfe, 0x5c, 0x97, 0x06, 0x7a, 0x16, 0xad, 0xb9, 0x61, 0xa3, 0xbc, 0xbe, 0x13, 0x58, - 0xdd, 0x41, 0x32, 0x1f, 0x78, 0xd0, 0x00, 0xdf, 0xd8, 0xa5, 0x5d, 0x3d, 0x3d, 0x4e, 0xfb, 0xdb, - 0x1a, 0x9c, 0x5f, 0xd8, 0xf6, 0x03, 0x4f, 0x37, 0x82, 0x0d, 0xd7, 0xdc, 0xa4, 0xdd, 0x9e, 0xad, - 0x07, 0x94, 0x74, 0xa0, 0xca, 0xe6, 0x66, 0xea, 0x81, 0x3e, 0x5d, 0xb8, 0x52, 0xb8, 0x5a, 0x9f, - 0x5f, 0x98, 0x1d, 0xf1, 0x5b, 0xcc, 0xae, 0x4b, 0x44, 0xcd, 0x89, 0xc3, 0x83, 0x46, 0x55, 0x3d, - 0x61, 0x48, 0x80, 0x7c, 0xa5, 0x00, 0x13, 0x8e, 0x6b, 0xd2, 0x16, 0xb5, 0xa9, 0x11, 0xb8, 0xde, - 0x74, 0xf1, 0x4a, 0xe9, 0x6a, 0x7d, 0xfe, 0x53, 0x23, 0x53, 0xcc, 0x78, 0xa3, 0xd9, 0x9b, 0x31, - 0x02, 0xd7, 0x9c, 0xc0, 0xdb, 0x6f, 0x3e, 0xfa, 0xad, 0x83, 0xc6, 0x23, 0x87, 0x07, 0x8d, 0x89, - 0x78, 0x17, 0x26, 0x66, 0x42, 0xb6, 0xa0, 0x1e, 0xb8, 0x36, 0x63, 0x99, 0xe5, 0x3a, 0xfe, 0x74, - 0x89, 0x4f, 0xec, 0xf2, 0xac, 0xe0, 0x36, 0x23, 0x3f, 0xcb, 0x96, 0xcb, 0xec, 0xde, 0x33, 0xb3, - 0x9b, 0x21, 0x58, 0xf3, 0xbc, 0x44, 0x5c, 0x8f, 0xda, 0x7c, 0x8c, 0xe3, 0x21, 0x14, 0xce, 0xf8, - 0xd4, 0xe8, 0x7b, 0x56, 0xb0, 0xbf, 0xe8, 0x3a, 0x01, 0xbd, 0x1b, 0x4c, 0x97, 0x39, 0x97, 0x9f, - 0xce, 0x42, 0xbd, 0xe1, 0x9a, 0xad, 0x24, 0x74, 0xf3, 0xfc, 0xe1, 0x41, 0xe3, 0x4c, 0xaa, 0x11, - 0xd3, 0x38, 0x89, 0x03, 0x67, 0xad, 0xae, 0xde, 0xa6, 0x1b, 0x7d, 0xdb, 0x6e, 0x51, 0xc3, 0xa3, - 0x81, 0x3f, 0x5d, 0xe1, 0xaf, 0x70, 0x35, 0x8b, 0xce, 0x9a, 0x6b, 0xe8, 0xf6, 0xad, 0xed, 0x37, - 0xa8, 0x11, 0x20, 0xdd, 0xa1, 0x1e, 0x75, 0x0c, 0xda, 0x9c, 0x96, 0x2f, 0x73, 0x76, 0x35, 0x85, - 0x09, 0x07, 0x70, 0x93, 0x15, 0x38, 0xd7, 0xf3, 0x2c, 0x97, 0x4f, 0xc1, 0xd6, 0x7d, 0xff, 0xa6, - 0xde, 0xa5, 0xd3, 0x63, 0x57, 0x0a, 0x57, 0x6b, 0xcd, 0x8b, 0x12, 0xcd, 0xb9, 0x8d, 0x34, 0x00, - 0x0e, 0x8e, 0x21, 0x57, 0xa1, 0xaa, 0x1a, 0xa7, 0xc7, 0xaf, 0x14, 0xae, 0x56, 0xc4, 0xda, 0x51, - 0x63, 0x31, 0xec, 0x25, 0xcb, 0x50, 0xd5, 0x77, 0x76, 0x2c, 0x87, 0x41, 0x56, 0x39, 0x0b, 0x2f, - 0x65, 0xbd, 0xda, 0x82, 0x84, 0x11, 0x78, 0xd4, 0x13, 0x86, 0x63, 0xc9, 0xcb, 0x40, 0x7c, 0xea, - 0xed, 0x59, 0x06, 0x5d, 0x30, 0x0c, 0xb7, 0xef, 0x04, 0x7c, 0xee, 0x35, 0x3e, 0xf7, 0x19, 0x39, - 0x77, 0xd2, 0x1a, 0x80, 0xc0, 0x8c, 0x51, 0xe4, 0x25, 0x38, 0x2b, 0xb7, 0x5d, 0xc4, 0x05, 0xe0, - 0x98, 0x1e, 0x65, 0x8c, 0xc4, 0x54, 0x1f, 0x0e, 0x40, 0x13, 0x13, 0x2e, 0xe9, 0xfd, 0xc0, 0xed, - 0x32, 0x94, 0x49, 0xa2, 0x9b, 0x6e, 0x87, 0x3a, 0xd3, 0xf5, 0x2b, 0x85, 0xab, 0xd5, 0xe6, 0x95, - 0xc3, 0x83, 0xc6, 0xa5, 0x85, 0xfb, 0xc0, 0xe1, 0x7d, 0xb1, 0x90, 0x5b, 0x50, 0x33, 0x1d, 0x7f, - 0xc3, 0xb5, 0x2d, 0x63, 0x7f, 0x7a, 0x82, 0x4f, 0xf0, 0x19, 0xf9, 0xaa, 0xb5, 0xa5, 0x9b, 0x2d, - 0xd1, 0x71, 0xef, 0xa0, 0x71, 0x69, 0xf0, 0x74, 0x9c, 0x0d, 0xfb, 0x31, 0xc2, 0x41, 0xd6, 0x39, - 0xc2, 0x45, 0xd7, 0xd9, 0xb1, 0xda, 0xd3, 0x93, 0xfc, 0x6b, 0x5c, 0x19, 0xb2, 0xa0, 0x97, 0x6e, - 0xb6, 0x04, 0x5c, 0x73, 0x52, 0x92, 0x13, 0x8f, 0x18, 0x61, 0x98, 0x79, 0x11, 0xce, 0x0d, 0xec, - 0x5a, 0x72, 0x16, 0x4a, 0x1d, 0xba, 0xcf, 0x0f, 0xa5, 0x1a, 0xb2, 0x9f, 0xe4, 0x51, 0xa8, 0xec, - 0xe9, 0x76, 0x9f, 0x4e, 0x17, 0x79, 0x9b, 0x78, 0xf8, 0x50, 0xf1, 0x85, 0x82, 0xf6, 0x9b, 0x25, - 0x98, 0x50, 0x67, 0x41, 0xcb, 0x72, 0x3a, 0xe4, 0x15, 0x28, 0xd9, 0x6e, 0x5b, 0x9e, 0x68, 0x1f, - 0x1e, 0xf9, 0x7c, 0x59, 0x73, 0xdb, 0xcd, 0xf1, 0xc3, 0x83, 0x46, 0x69, 0xcd, 0x6d, 0x23, 0xc3, - 0x48, 0x0c, 0xa8, 0x74, 0xf4, 0x9d, 0x8e, 0xce, 0xe7, 0x50, 0x9f, 0x6f, 0x8e, 0x8c, 0xfa, 0x06, - 0xc3, 0xc2, 0xe6, 0xda, 0xac, 0x1d, 0x1e, 0x34, 0x2a, 0xfc, 0x11, 0x05, 0x6e, 0xe2, 0x42, 0x6d, - 0xdb, 0xd6, 0x8d, 0xce, 0xae, 0x6b, 0xd3, 0xe9, 0x52, 0x4e, 0x42, 0x4d, 0x85, 0x49, 0x7c, 0x80, - 0xf0, 0x11, 0x23, 0x1a, 0xc4, 0x80, 0xb1, 0xbe, 0xe9, 0x5b, 0x4e, 0x47, 0x9e, 0x4e, 0x2f, 0x8e, - 0x4c, 0x6d, 0x6b, 0x89, 0xbf, 0x13, 0x1c, 0x1e, 0x34, 0xc6, 0xc4, 0x6f, 0x94, 0xa8, 0xb5, 0xef, - 0xd5, 0x61, 0x4a, 0x7d, 0xa4, 0xdb, 0xd4, 0x0b, 0xe8, 0x5d, 0x72, 0x05, 0xca, 0x0e, 0xdb, 0x34, - 0xfc, 0x23, 0x37, 0x27, 0xe4, 0x9a, 0x2c, 0xf3, 0xcd, 0xc2, 0x7b, 0xd8, 0xcc, 0x84, 0xc0, 0x95, - 0x0c, 0x1f, 0x7d, 0x66, 0x2d, 0x8e, 0x46, 0xcc, 0x4c, 0xfc, 0x46, 0x89, 0x9a, 0xbc, 0x06, 0x65, - 0xfe, 0xf2, 0x82, 0xd5, 0x1f, 0x19, 0x9d, 0x04, 0x7b, 0xf5, 0x2a, 0x7b, 0x03, 0xfe, 0xe2, 0x1c, - 0x29, 0x5b, 0x8a, 0x7d, 0x73, 0x47, 0x32, 0xf6, 0xc3, 0x39, 0x18, 0xbb, 0x2c, 0x96, 0xe2, 0xd6, - 0xd2, 0x32, 0x32, 0x8c, 0xe4, 0x97, 0x0b, 0x70, 0xce, 0x70, 0x9d, 0x40, 0x67, 0x4a, 0x80, 0x12, - 0x7f, 0xd3, 0x15, 0x4e, 0xe7, 0xe5, 0x91, 0xe9, 0x2c, 0xa6, 0x31, 0x36, 0x1f, 0x63, 0xa7, 0xf9, - 0x40, 0x33, 0x0e, 0xd2, 0x26, 0xbf, 0x5a, 0x80, 0xc7, 0xd8, 0x29, 0x3b, 0x00, 0xcc, 0x65, 0xc3, - 0xc9, 0xce, 0xea, 0xe2, 0xe1, 0x41, 0xe3, 0xb1, 0xd5, 0x2c, 0x62, 0x98, 0x3d, 0x07, 0x36, 0xbb, - 0xf3, 0xfa, 0xa0, 0xc2, 0xc0, 0xe5, 0x4e, 0x7d, 0x7e, 0xed, 0x24, 0x95, 0x90, 0xe6, 0x13, 0x72, - 0x29, 0x67, 0xe9, 0x5c, 0x98, 0x35, 0x0b, 0x72, 0x0d, 0xc6, 0xf7, 0x5c, 0xbb, 0xdf, 0xa5, 0xfe, - 0x74, 0x95, 0x4b, 0xee, 0x99, 0xac, 0x03, 0xf5, 0x36, 0x07, 0x69, 0x9e, 0x91, 0xe8, 0xc7, 0xc5, - 0xb3, 0x8f, 0x6a, 0x2c, 0xb1, 0x60, 0xcc, 0xb6, 0xba, 0x56, 0xe0, 0x73, 0x91, 0x56, 0x9f, 0xbf, - 0x36, 0xf2, 0x6b, 0x89, 0x2d, 0xba, 0xc6, 0x91, 0x89, 0x5d, 0x23, 0x7e, 0xa3, 0x24, 0xc0, 0x8e, - 0x42, 0xdf, 0xd0, 0x6d, 0x21, 0xf2, 0xea, 0xf3, 0x1f, 0x1d, 0x7d, 0xdb, 0x30, 0x2c, 0xcd, 0x49, - 0xf9, 0x4e, 0x15, 0xfe, 0x88, 0x02, 0x37, 0xf9, 0x24, 0x4c, 0x25, 0xbe, 0xa6, 0x3f, 0x5d, 0xe7, - 0xdc, 0x79, 0x32, 0x8b, 0x3b, 0x21, 0x54, 0xf3, 0x82, 0x44, 0x36, 0x95, 0x58, 0x21, 0x3e, 0xa6, - 0x90, 0x91, 0x1b, 0x50, 0xf5, 0x2d, 0x93, 0x1a, 0xba, 0xe7, 0x4f, 0x4f, 0x1c, 0x05, 0xf1, 0x59, - 0x89, 0xb8, 0xda, 0x92, 0xc3, 0x30, 0x44, 0x40, 0x66, 0x01, 0x7a, 0xba, 0x17, 0x58, 0x42, 0x85, - 0x9c, 0xe4, 0xea, 0xcc, 0xd4, 0xe1, 0x41, 0x03, 0x36, 0xc2, 0x56, 0x8c, 0x41, 0x30, 0x78, 0x36, - 0x76, 0xd5, 0xe9, 0xf5, 0x03, 0x7f, 0x7a, 0xea, 0x4a, 0xe9, 0x6a, 0x4d, 0xc0, 0xb7, 0xc2, 0x56, - 0x8c, 0x41, 0x90, 0x6f, 0x14, 0xe0, 0x89, 0xe8, 0x71, 0x70, 0x93, 0x9d, 0x39, 0xf1, 0x4d, 0xd6, - 0x38, 0x3c, 0x68, 0x3c, 0xd1, 0x1a, 0x4e, 0x12, 0xef, 0x37, 0x1f, 0xed, 0x15, 0x98, 0x5c, 0xe8, - 0x07, 0xbb, 0xae, 0x67, 0xbd, 0xc9, 0xd5, 0x61, 0xb2, 0x0c, 0x95, 0x80, 0xab, 0x35, 0x42, 0x2e, - 0x3f, 0x95, 0xc5, 0x6a, 0xa1, 0x62, 0xde, 0xa0, 0xfb, 0x4a, 0x1b, 0x10, 0xf2, 0x51, 0xa8, 0x39, - 0x62, 0xb8, 0xf6, 0x1b, 0x05, 0xa8, 0x35, 0x75, 0xdf, 0x32, 0x18, 0x7a, 0xb2, 0x08, 0xe5, 0xbe, - 0x4f, 0xbd, 0xe3, 0x21, 0xe5, 0xa7, 0xf4, 0x96, 0x4f, 0x3d, 0xe4, 0x83, 0xc9, 0x2d, 0xa8, 0xf6, - 0x74, 0xdf, 0xbf, 0xe3, 0x7a, 0xa6, 0x94, 0x34, 0x47, 0x44, 0x24, 0xf4, 0x55, 0x39, 0x14, 0x43, - 0x24, 0x5a, 0x1d, 0x22, 0x51, 0xab, 0xfd, 0xa0, 0x00, 0xe7, 0x9b, 0xfd, 0x9d, 0x1d, 0xea, 0x49, - 0xf5, 0x4c, 0x28, 0x3e, 0x84, 0x42, 0xc5, 0xa3, 0xa6, 0xe5, 0xcb, 0xb9, 0x2f, 0x8d, 0xfc, 0xe9, - 0x90, 0x61, 0x91, 0x7a, 0x16, 0xe7, 0x17, 0x6f, 0x40, 0x81, 0x9d, 0xf4, 0xa1, 0xf6, 0x06, 0x0d, - 0xfc, 0xc0, 0xa3, 0x7a, 0x57, 0xbe, 0xdd, 0xf5, 0x91, 0x49, 0xbd, 0x4c, 0x83, 0x16, 0xc7, 0x14, - 0x57, 0xeb, 0xc2, 0x46, 0x8c, 0x28, 0x69, 0x7f, 0x58, 0x81, 0x89, 0x45, 0xb7, 0xbb, 0x6d, 0x39, - 0xd4, 0xbc, 0x66, 0xb6, 0x29, 0x79, 0x1d, 0xca, 0xd4, 0x6c, 0x53, 0xf9, 0xb6, 0xa3, 0xcb, 0x59, - 0x86, 0x2c, 0xd2, 0x16, 0xd8, 0x13, 0x72, 0xc4, 0x64, 0x0d, 0xa6, 0x76, 0x3c, 0xb7, 0x2b, 0x8e, - 0xae, 0xcd, 0xfd, 0x9e, 0x54, 0x15, 0x9b, 0xff, 0x43, 0x1d, 0x07, 0xcb, 0x89, 0xde, 0x7b, 0x07, - 0x0d, 0x88, 0x9e, 0x30, 0x35, 0x96, 0xbc, 0x0a, 0xd3, 0x51, 0x4b, 0xb8, 0x87, 0x17, 0x99, 0x5e, - 0xcd, 0x55, 0x85, 0x4a, 0xf3, 0xd2, 0xe1, 0x41, 0x63, 0x7a, 0x79, 0x08, 0x0c, 0x0e, 0x1d, 0x4d, - 0xde, 0x2a, 0xc0, 0xd9, 0xa8, 0x53, 0x9c, 0xab, 0x52, 0x43, 0x38, 0xa1, 0x03, 0x9b, 0x1b, 0x20, - 0xcb, 0x29, 0x12, 0x38, 0x40, 0x94, 0x2c, 0xc3, 0x44, 0xe0, 0xc6, 0xf8, 0x55, 0xe1, 0xfc, 0xd2, - 0x94, 0xc5, 0xbc, 0xe9, 0x0e, 0xe5, 0x56, 0x62, 0x1c, 0x41, 0xb8, 0xa0, 0x9e, 0x53, 0x9c, 0x1a, - 0xe3, 0x9c, 0x9a, 0x39, 0x3c, 0x68, 0x5c, 0xd8, 0xcc, 0x84, 0xc0, 0x21, 0x23, 0xc9, 0x17, 0x0a, - 0x30, 0xa5, 0xba, 0x24, 0x8f, 0xc6, 0x4f, 0x92, 0x47, 0x84, 0xad, 0x88, 0xcd, 0x04, 0x01, 0x4c, - 0x11, 0xd4, 0x7e, 0x58, 0x86, 0x5a, 0x78, 0xb2, 0x91, 0x77, 0x43, 0x85, 0xdb, 0xc2, 0x52, 0x61, - 0x0d, 0x45, 0x16, 0x37, 0x99, 0x51, 0xf4, 0x91, 0xa7, 0x60, 0xdc, 0x70, 0xbb, 0x5d, 0xdd, 0x31, - 0xb9, 0x7f, 0xa3, 0xd6, 0xac, 0x33, 0x49, 0xbd, 0x28, 0x9a, 0x50, 0xf5, 0x91, 0x4b, 0x50, 0xd6, - 0xbd, 0xb6, 0x70, 0x35, 0xd4, 0xc4, 0x79, 0xb4, 0xe0, 0xb5, 0x7d, 0xe4, 0xad, 0xe4, 0x83, 0x50, - 0xa2, 0xce, 0xde, 0x74, 0x79, 0xb8, 0x2a, 0x70, 0xcd, 0xd9, 0xbb, 0xad, 0x7b, 0xcd, 0xba, 0x9c, - 0x43, 0xe9, 0x9a, 0xb3, 0x87, 0x6c, 0x0c, 0x59, 0x83, 0x71, 0xea, 0xec, 0xb1, 0x6f, 0x2f, 0x7d, - 0x00, 0xef, 0x1a, 0x32, 0x9c, 0x81, 0x48, 0xad, 0x38, 0x54, 0x28, 0x64, 0x33, 0x2a, 0x14, 0xe4, - 0xe3, 0x30, 0x21, 0x74, 0x8b, 0x75, 0xf6, 0x4d, 0xfc, 0xe9, 0x31, 0x8e, 0xb2, 0x31, 0x5c, 0x39, - 0xe1, 0x70, 0x91, 0xcf, 0x25, 0xd6, 0xe8, 0x63, 0x02, 0x15, 0xf9, 0x38, 0xd4, 0x94, 0x3b, 0x4d, - 0x7d, 0xd9, 0x4c, 0x77, 0x05, 0x4a, 0x20, 0xa4, 0x9f, 0xee, 0x5b, 0x1e, 0xed, 0x52, 0x27, 0xf0, - 0x9b, 0xe7, 0x94, 0x01, 0xab, 0x7a, 0x7d, 0x8c, 0xb0, 0x91, 0xed, 0x41, 0xbf, 0x8b, 0x70, 0x1a, - 0xbc, 0x7b, 0xc8, 0xa9, 0x3e, 0x82, 0xd3, 0xe5, 0x53, 0x70, 0x26, 0x74, 0x8c, 0x48, 0xdb, 0x5a, - 0xb8, 0x11, 0x9e, 0x65, 0xc3, 0x57, 0x93, 0x5d, 0xf7, 0x0e, 0x1a, 0x4f, 0x66, 0x58, 0xd7, 0x11, - 0x00, 0xa6, 0x91, 0x69, 0xbf, 0x5f, 0x82, 0x41, 0xb5, 0x3b, 0xc9, 0xb4, 0xc2, 0x49, 0x33, 0x2d, - 0xfd, 0x42, 0xe2, 0xf8, 0x7c, 0x41, 0x0e, 0xcb, 0xff, 0x52, 0x59, 0x1f, 0xa6, 0x74, 0xd2, 0x1f, - 0xe6, 0x61, 0xd9, 0x3b, 0xda, 0xdb, 0x65, 0x98, 0x5a, 0xd2, 0x69, 0xd7, 0x75, 0x1e, 0x68, 0x84, - 0x14, 0x1e, 0x0a, 0x23, 0xe4, 0x2a, 0x54, 0x3d, 0xda, 0xb3, 0x2d, 0x43, 0xf7, 0xf9, 0xa7, 0x97, - 0xee, 0x38, 0x94, 0x6d, 0x18, 0xf6, 0x0e, 0x31, 0x3e, 0x4b, 0x0f, 0xa5, 0xf1, 0x59, 0xfe, 0xc9, - 0x1b, 0x9f, 0xda, 0x17, 0x8a, 0xc0, 0x15, 0x15, 0x72, 0x05, 0xca, 0x4c, 0x08, 0xa7, 0x5d, 0x1e, - 0x7c, 0xe1, 0xf0, 0x1e, 0x32, 0x03, 0xc5, 0xc0, 0x95, 0x3b, 0x0f, 0x64, 0x7f, 0x71, 0xd3, 0xc5, - 0x62, 0xe0, 0x92, 0x37, 0x01, 0x0c, 0xd7, 0x31, 0x2d, 0xe5, 0xa5, 0xce, 0xf7, 0x62, 0xcb, 0xae, - 0x77, 0x47, 0xf7, 0xcc, 0xc5, 0x10, 0xa3, 0x30, 0x3f, 0xa2, 0x67, 0x8c, 0x51, 0x23, 0x2f, 0xc2, - 0x98, 0xeb, 0x2c, 0xf7, 0x6d, 0x9b, 0x33, 0xb4, 0xd6, 0xfc, 0x9f, 0xcc, 0x26, 0xbc, 0xc5, 0x5b, - 0xee, 0x1d, 0x34, 0x2e, 0x0a, 0xfd, 0x96, 0x3d, 0xbd, 0xe2, 0x59, 0x81, 0xe5, 0xb4, 0x5b, 0x81, - 0xa7, 0x07, 0xb4, 0xbd, 0x8f, 0x72, 0x98, 0xf6, 0xe5, 0x02, 0xd4, 0x97, 0xad, 0xbb, 0xd4, 0x7c, - 0xc5, 0x72, 0x4c, 0xf7, 0x0e, 0x41, 0x18, 0xb3, 0xa9, 0xd3, 0x0e, 0x76, 0xe5, 0xea, 0x9f, 0x8d, - 0xed, 0xb5, 0x30, 0xb8, 0x11, 0xcd, 0xbf, 0x4b, 0x03, 0x9d, 0xed, 0xbe, 0xa5, 0xbe, 0x74, 0xbf, - 0x0b, 0xa3, 0x94, 0x63, 0x40, 0x89, 0x89, 0xcc, 0x41, 0x4d, 0x68, 0x9f, 0x96, 0xd3, 0xe6, 0x3c, - 0xac, 0x46, 0x87, 0x5e, 0x4b, 0x75, 0x60, 0x04, 0xa3, 0xed, 0xc3, 0xb9, 0x01, 0x36, 0x10, 0x13, - 0xca, 0x81, 0xde, 0x56, 0xe7, 0xeb, 0xf2, 0xc8, 0x0c, 0xde, 0xd4, 0xdb, 0x31, 0xe6, 0x72, 0x19, - 0xbf, 0xa9, 0x33, 0x19, 0xcf, 0xb0, 0x6b, 0x3f, 0x2a, 0x40, 0x75, 0xb9, 0xef, 0x18, 0xdc, 0x36, - 0x7a, 0xb0, 0x2b, 0x4c, 0x29, 0x0c, 0xc5, 0x4c, 0x85, 0xa1, 0x0f, 0x63, 0x9d, 0x3b, 0xa1, 0x42, - 0x51, 0x9f, 0x5f, 0x1f, 0x7d, 0x55, 0xc8, 0x29, 0xcd, 0xde, 0xe0, 0xf8, 0x44, 0x0c, 0x65, 0x4a, - 0x4e, 0x68, 0xec, 0xc6, 0x2b, 0x9c, 0xa8, 0x24, 0x36, 0xf3, 0x41, 0xa8, 0xc7, 0xc0, 0x8e, 0xe5, - 0xb4, 0xfd, 0x9d, 0x32, 0x8c, 0xad, 0xb4, 0x5a, 0x0b, 0x1b, 0xab, 0xe4, 0x39, 0xa8, 0x4b, 0xf7, - 0xfa, 0xcd, 0x88, 0x07, 0x61, 0x74, 0xa5, 0x15, 0x75, 0x61, 0x1c, 0x8e, 0xa9, 0x63, 0x1e, 0xd5, - 0xed, 0xae, 0xdc, 0x2c, 0xa1, 0x3a, 0x86, 0xac, 0x11, 0x45, 0x1f, 0xd1, 0x61, 0x8a, 0x59, 0x78, - 0x8c, 0x85, 0xc2, 0x7a, 0x93, 0xdb, 0xe6, 0x88, 0xf6, 0x1d, 0x57, 0x12, 0xb7, 0x12, 0x08, 0x30, - 0x85, 0x90, 0xbc, 0x00, 0x55, 0xbd, 0x1f, 0xec, 0x72, 0x05, 0x5a, 0xec, 0x8d, 0x4b, 0x3c, 0xfa, - 0x20, 0xdb, 0xee, 0x1d, 0x34, 0x26, 0x6e, 0x60, 0xf3, 0x39, 0xf5, 0x8c, 0x21, 0x34, 0x9b, 0x9c, - 0xb2, 0x18, 0xe5, 0xe4, 0x2a, 0xc7, 0x9e, 0xdc, 0x46, 0x02, 0x01, 0xa6, 0x10, 0x92, 0xd7, 0x60, - 0xa2, 0x43, 0xf7, 0x03, 0x7d, 0x5b, 0x12, 0x18, 0x3b, 0x0e, 0x81, 0xb3, 0x4c, 0x85, 0xbb, 0x11, - 0x1b, 0x8e, 0x09, 0x64, 0xc4, 0x87, 0x47, 0x3b, 0xd4, 0xdb, 0xa6, 0x9e, 0x2b, 0xad, 0x4f, 0x49, - 0x64, 0xfc, 0x38, 0x44, 0xa6, 0x0f, 0x0f, 0x1a, 0x8f, 0xde, 0xc8, 0x40, 0x83, 0x99, 0xc8, 0xb5, - 0xff, 0x28, 0xc2, 0x99, 0x15, 0x11, 0xdf, 0x74, 0x3d, 0x21, 0x84, 0xc9, 0x45, 0x28, 0x79, 0xbd, - 0x3e, 0x5f, 0x39, 0x25, 0xe1, 0x27, 0xc5, 0x8d, 0x2d, 0x64, 0x6d, 0xe4, 0x55, 0xa8, 0x9a, 0xf2, - 0xc8, 0x90, 0xc6, 0xef, 0x71, 0x0f, 0x1a, 0x2e, 0x04, 0xd5, 0x13, 0x86, 0xd8, 0x98, 0xa6, 0xdf, - 0xf5, 0xdb, 0x2d, 0xeb, 0x4d, 0x2a, 0xed, 0x41, 0xae, 0xe9, 0xaf, 0x8b, 0x26, 0x54, 0x7d, 0x4c, - 0xaa, 0x76, 0xe8, 0xbe, 0xb0, 0x86, 0xca, 0x91, 0x54, 0xbd, 0x21, 0xdb, 0x30, 0xec, 0x25, 0x0d, - 0xb5, 0x59, 0xd8, 0x2a, 0x28, 0x0b, 0x4b, 0xfe, 0x36, 0x6b, 0x90, 0xfb, 0x86, 0x1d, 0x99, 0x6f, - 0x58, 0x41, 0x40, 0x3d, 0xf9, 0x19, 0x47, 0x3a, 0x32, 0x5f, 0xe6, 0x18, 0x50, 0x62, 0x22, 0xef, - 0x85, 0x1a, 0x47, 0xde, 0xb4, 0xdd, 0x6d, 0xfe, 0xe1, 0x6a, 0xc2, 0xa6, 0xbf, 0xad, 0x1a, 0x31, - 0xea, 0xd7, 0x7e, 0x5c, 0x84, 0x0b, 0x2b, 0x34, 0x10, 0x5a, 0xcd, 0x12, 0xed, 0xd9, 0xee, 0x3e, - 0x53, 0x2d, 0x91, 0x7e, 0x9a, 0xbc, 0x04, 0x60, 0xf9, 0xdb, 0xad, 0x3d, 0x83, 0xef, 0x03, 0xb1, - 0x87, 0xaf, 0xc8, 0x2d, 0x09, 0xab, 0xad, 0xa6, 0xec, 0xb9, 0x97, 0x78, 0xc2, 0xd8, 0x98, 0xc8, - 0xbc, 0x2a, 0xde, 0xc7, 0xbc, 0x6a, 0x01, 0xf4, 0x22, 0x05, 0xb5, 0xc4, 0x21, 0xff, 0x8f, 0x22, - 0x73, 0x1c, 0xdd, 0x34, 0x86, 0x26, 0x8f, 0xca, 0xe8, 0xc0, 0x59, 0x93, 0xee, 0xe8, 0x7d, 0x3b, - 0x08, 0x95, 0x6a, 0xb9, 0x89, 0x8f, 0xae, 0x97, 0x87, 0xb1, 0xd7, 0xa5, 0x14, 0x26, 0x1c, 0xc0, - 0xad, 0xfd, 0x6e, 0x09, 0x66, 0x56, 0x68, 0x10, 0x7a, 0x5c, 0xe4, 0xe9, 0xd8, 0xea, 0x51, 0x83, - 0x7d, 0x85, 0xb7, 0x0a, 0x30, 0x66, 0xeb, 0xdb, 0xd4, 0x66, 0xd2, 0x8b, 0xbd, 0xcd, 0xeb, 0x23, - 0x0b, 0x82, 0xe1, 0x54, 0x66, 0xd7, 0x38, 0x85, 0x94, 0x68, 0x10, 0x8d, 0x28, 0xc9, 0xb3, 0x43, - 0xdd, 0xb0, 0xfb, 0x7e, 0x40, 0xbd, 0x0d, 0xd7, 0x0b, 0xa4, 0x3e, 0x19, 0x1e, 0xea, 0x8b, 0x51, - 0x17, 0xc6, 0xe1, 0xc8, 0x3c, 0x80, 0x61, 0x5b, 0xd4, 0x09, 0xf8, 0x28, 0xb1, 0xaf, 0x88, 0xfa, - 0xbe, 0x8b, 0x61, 0x0f, 0xc6, 0xa0, 0x18, 0xa9, 0xae, 0xeb, 0x58, 0x81, 0x2b, 0x48, 0x95, 0x93, - 0xa4, 0xd6, 0xa3, 0x2e, 0x8c, 0xc3, 0xf1, 0x61, 0x34, 0xf0, 0x2c, 0xc3, 0xe7, 0xc3, 0x2a, 0xa9, - 0x61, 0x51, 0x17, 0xc6, 0xe1, 0x98, 0xcc, 0x8b, 0xbd, 0xff, 0xb1, 0x64, 0xde, 0xd7, 0x6b, 0x70, - 0x39, 0xc1, 0xd6, 0x40, 0x0f, 0xe8, 0x4e, 0xdf, 0x6e, 0xd1, 0x40, 0x7d, 0xc0, 0x11, 0x65, 0xe1, - 0x2f, 0x44, 0xdf, 0x5d, 0x64, 0x55, 0x18, 0x27, 0xf3, 0xdd, 0x07, 0x26, 0x78, 0xa4, 0x6f, 0x3f, - 0x07, 0x35, 0x47, 0x0f, 0x7c, 0xbe, 0x71, 0xe5, 0x1e, 0x0d, 0xd5, 0xb0, 0x9b, 0xaa, 0x03, 0x23, - 0x18, 0xb2, 0x01, 0x8f, 0x4a, 0x16, 0x5f, 0xbb, 0xdb, 0x73, 0xbd, 0x80, 0x7a, 0x62, 0xac, 0x14, - 0xa7, 0x72, 0xec, 0xa3, 0xeb, 0x19, 0x30, 0x98, 0x39, 0x92, 0xac, 0xc3, 0x79, 0x43, 0x44, 0x9a, - 0xa9, 0xed, 0xea, 0xa6, 0x42, 0x28, 0x1c, 0x5c, 0xa1, 0x69, 0xb4, 0x38, 0x08, 0x82, 0x59, 0xe3, - 0xd2, 0xab, 0x79, 0x6c, 0xa4, 0xd5, 0x3c, 0x3e, 0xca, 0x6a, 0xae, 0x8e, 0xb6, 0x9a, 0x6b, 0x47, - 0x5b, 0xcd, 0x8c, 0xf3, 0x6c, 0x1d, 0x51, 0x8f, 0xa9, 0x27, 0x42, 0xc2, 0xc6, 0x12, 0x19, 0x42, - 0xce, 0xb7, 0x32, 0x60, 0x30, 0x73, 0x24, 0xd9, 0x86, 0x19, 0xd1, 0x7e, 0xcd, 0x31, 0xbc, 0xfd, - 0x1e, 0x13, 0x3c, 0x31, 0xbc, 0xf5, 0x84, 0x87, 0x71, 0xa6, 0x35, 0x14, 0x12, 0xef, 0x83, 0x85, - 0xfc, 0x5f, 0x98, 0x14, 0x5f, 0x69, 0x5d, 0xef, 0x71, 0xb4, 0x22, 0xad, 0xe1, 0x31, 0x89, 0x76, - 0x72, 0x31, 0xde, 0x89, 0x49, 0x58, 0xb2, 0x00, 0x67, 0x7a, 0x7b, 0x06, 0xfb, 0xb9, 0xba, 0x73, - 0x93, 0x52, 0x93, 0x9a, 0x3c, 0x5a, 0x53, 0x6b, 0x3e, 0xae, 0x1c, 0x1d, 0x1b, 0xc9, 0x6e, 0x4c, - 0xc3, 0x93, 0x17, 0x60, 0xc2, 0x0f, 0x74, 0x2f, 0x90, 0x6e, 0xbd, 0xe9, 0x29, 0x91, 0xf6, 0xa1, - 0xbc, 0x5e, 0xad, 0x58, 0x1f, 0x26, 0x20, 0x33, 0xe5, 0xc5, 0x99, 0xd3, 0x93, 0x17, 0x79, 0x4e, - 0xab, 0x7b, 0x42, 0xd8, 0xf3, 0x58, 0x42, 0x4a, 0xcc, 0x7c, 0x29, 0x2d, 0x66, 0x5e, 0xcb, 0x73, - 0xdc, 0x64, 0x50, 0x38, 0xd2, 0x31, 0xf3, 0x32, 0x10, 0x4f, 0x46, 0x3e, 0x84, 0xbd, 0x1d, 0x93, - 0x34, 0x61, 0x32, 0x0f, 0x0e, 0x40, 0x60, 0xc6, 0x28, 0xd2, 0x82, 0xc7, 0x7c, 0xea, 0x04, 0x96, - 0x43, 0xed, 0x24, 0x3a, 0x21, 0x82, 0x9e, 0x94, 0xe8, 0x1e, 0x6b, 0x65, 0x01, 0x61, 0xf6, 0xd8, - 0x3c, 0xcc, 0xff, 0x33, 0xe0, 0x72, 0x5e, 0xb0, 0xe6, 0xc4, 0xc4, 0xc4, 0x5b, 0x69, 0x31, 0xf1, - 0x7a, 0xfe, 0xef, 0x36, 0x9a, 0x88, 0x98, 0x07, 0xe0, 0x5f, 0x21, 0x2e, 0x23, 0xc2, 0x93, 0x11, - 0xc3, 0x1e, 0x8c, 0x41, 0xb1, 0x5d, 0xaf, 0xf8, 0x1c, 0x17, 0x0f, 0xe1, 0xae, 0x6f, 0xc5, 0x3b, - 0x31, 0x09, 0x3b, 0x54, 0xc4, 0x54, 0x46, 0x16, 0x31, 0x2f, 0x03, 0x49, 0x78, 0x7b, 0x04, 0xbe, - 0xb1, 0x64, 0x2e, 0xd9, 0xea, 0x00, 0x04, 0x66, 0x8c, 0x1a, 0xb2, 0x94, 0xc7, 0x4f, 0x76, 0x29, - 0x57, 0x47, 0x5f, 0xca, 0xe4, 0x75, 0xb8, 0xc8, 0x49, 0x49, 0xfe, 0x24, 0x11, 0x0b, 0x61, 0xf3, - 0x2e, 0x89, 0xf8, 0x22, 0x0e, 0x03, 0xc4, 0xe1, 0x38, 0xd8, 0xf7, 0x31, 0x3c, 0x6a, 0x32, 0xe2, - 0xba, 0x3d, 0x5c, 0x10, 0x2d, 0x66, 0xc0, 0x60, 0xe6, 0x48, 0xb6, 0xc4, 0x02, 0xb6, 0x0c, 0xf5, - 0x6d, 0x9b, 0x9a, 0x32, 0x97, 0x2e, 0x5c, 0x62, 0x9b, 0x6b, 0x2d, 0xd9, 0x83, 0x31, 0xa8, 0x2c, - 0xd9, 0x30, 0x71, 0x4c, 0xd9, 0xb0, 0xc2, 0x5d, 0xa3, 0x3b, 0x09, 0x11, 0x24, 0x05, 0x4c, 0x98, - 0x1d, 0xb9, 0x98, 0x06, 0xc0, 0xc1, 0x31, 0x5c, 0x34, 0x1b, 0x9e, 0xd5, 0x0b, 0xfc, 0x24, 0xae, - 0xa9, 0x94, 0x68, 0xce, 0x80, 0xc1, 0xcc, 0x91, 0x4c, 0x29, 0xda, 0xa5, 0xba, 0x1d, 0xec, 0x26, - 0x11, 0x9e, 0x49, 0x2a, 0x45, 0xd7, 0x07, 0x41, 0x30, 0x6b, 0x5c, 0xa6, 0x2c, 0x3b, 0xfb, 0x70, - 0xca, 0xb2, 0x2f, 0x96, 0xe0, 0xe2, 0x0a, 0x0d, 0xc2, 0x64, 0x86, 0x9f, 0xd9, 0xae, 0x3f, 0x01, - 0xdb, 0xf5, 0x6b, 0x15, 0x38, 0xbf, 0x42, 0x65, 0xf6, 0xdf, 0x86, 0x6b, 0x2a, 0x61, 0xf6, 0xdf, - 0x94, 0xfd, 0xeb, 0x70, 0x3e, 0xca, 0x9f, 0x69, 0x05, 0xae, 0x27, 0x64, 0x79, 0xca, 0x44, 0x69, - 0x0d, 0x82, 0x60, 0xd6, 0x38, 0xf2, 0x71, 0x78, 0x9c, 0x8b, 0x7a, 0xa7, 0x2d, 0x9c, 0x62, 0xc2, - 0x82, 0x8b, 0xe5, 0x66, 0x37, 0x24, 0xca, 0xc7, 0x5b, 0xd9, 0x60, 0x38, 0x6c, 0x3c, 0xf9, 0x1c, - 0x4c, 0xf4, 0xac, 0x1e, 0xb5, 0x2d, 0x87, 0xeb, 0x67, 0xb9, 0xe3, 0xf0, 0x1b, 0x31, 0x64, 0x91, - 0xd6, 0x1c, 0x6f, 0xc5, 0x04, 0xc1, 0xcc, 0x95, 0x5a, 0x3d, 0xc5, 0x95, 0xfa, 0xaf, 0x45, 0x18, - 0x5f, 0xf1, 0xdc, 0x7e, 0xaf, 0xb9, 0x4f, 0xda, 0x30, 0x76, 0x87, 0x47, 0x2c, 0x64, 0x3c, 0x60, - 0xf4, 0x1c, 0x54, 0x11, 0xf8, 0x88, 0x54, 0x22, 0xf1, 0x8c, 0x12, 0x3d, 0x5b, 0xc4, 0x1d, 0xba, - 0x4f, 0x4d, 0x19, 0xb8, 0x08, 0x17, 0xf1, 0x0d, 0xd6, 0x88, 0xa2, 0x8f, 0x74, 0xe1, 0x8c, 0x6e, - 0xdb, 0xee, 0x1d, 0x6a, 0xae, 0xe9, 0x01, 0x75, 0xa8, 0xaf, 0xe2, 0x40, 0xc7, 0xf5, 0x05, 0xf2, - 0x60, 0xea, 0x42, 0x12, 0x15, 0xa6, 0x71, 0x93, 0x37, 0x60, 0xdc, 0x0f, 0x5c, 0x4f, 0x29, 0x5b, - 0xf5, 0xf9, 0xc5, 0xd1, 0x3f, 0x7a, 0xf3, 0x63, 0x2d, 0x81, 0x4a, 0x38, 0x4a, 0xe5, 0x03, 0x2a, - 0x02, 0xda, 0x57, 0x0b, 0x00, 0xd7, 0x37, 0x37, 0x37, 0xa4, 0x4f, 0xd7, 0x84, 0xb2, 0xde, 0x0f, - 0xa3, 0x43, 0xa3, 0x47, 0x61, 0x12, 0x49, 0x68, 0x32, 0x70, 0xd2, 0x0f, 0x76, 0x91, 0x63, 0x27, - 0xef, 0x81, 0x71, 0xa9, 0x20, 0x4b, 0xb6, 0x87, 0xf1, 0x5c, 0xa9, 0x44, 0xa3, 0xea, 0xd7, 0x7e, - 0xbb, 0x08, 0xb0, 0x6a, 0xda, 0xb4, 0xa5, 0xd2, 0x86, 0x6b, 0xc1, 0xae, 0x47, 0xfd, 0x5d, 0xd7, - 0x36, 0x47, 0x0c, 0x61, 0x71, 0x47, 0xeb, 0xa6, 0x42, 0x82, 0x11, 0x3e, 0x62, 0x32, 0x03, 0x93, - 0xf6, 0x56, 0x9d, 0x80, 0x7a, 0x7b, 0xba, 0x3d, 0xa2, 0xe7, 0xfa, 0xac, 0x30, 0x46, 0x23, 0x3c, - 0x98, 0xc0, 0x4a, 0x74, 0xa8, 0x5b, 0x8e, 0x21, 0x36, 0x48, 0x73, 0x7f, 0xc4, 0x85, 0x74, 0x86, - 0x59, 0x1c, 0xab, 0x11, 0x1a, 0x8c, 0xe3, 0xd4, 0xbe, 0x5f, 0x84, 0x0b, 0x9c, 0x1e, 0x9b, 0x46, - 0x22, 0x09, 0x8e, 0xfc, 0xff, 0x81, 0xcb, 0x47, 0xff, 0xfb, 0x68, 0xa4, 0xc5, 0xdd, 0x95, 0x75, - 0x1a, 0xe8, 0x91, 0x3e, 0x17, 0xb5, 0xc5, 0x6e, 0x1c, 0xf5, 0xa1, 0xec, 0xb3, 0xf3, 0x4a, 0x70, - 0xaf, 0x35, 0xf2, 0x12, 0xca, 0x7e, 0x01, 0x7e, 0x7a, 0x85, 0xa1, 0x3a, 0x7e, 0x6a, 0x71, 0x72, - 0xe4, 0xb3, 0x30, 0xe6, 0x07, 0x7a, 0xd0, 0x57, 0x5b, 0x73, 0xeb, 0xa4, 0x09, 0x73, 0xe4, 0xd1, - 0x39, 0x22, 0x9e, 0x51, 0x12, 0xd5, 0xbe, 0x5f, 0x80, 0x99, 0xec, 0x81, 0x6b, 0x96, 0x1f, 0x90, - 0xff, 0x37, 0xc0, 0xf6, 0x23, 0x7e, 0x71, 0x36, 0x9a, 0x33, 0x3d, 0xcc, 0x82, 0x55, 0x2d, 0x31, - 0x96, 0x07, 0x50, 0xb1, 0x02, 0xda, 0x55, 0xf6, 0xe5, 0xad, 0x13, 0x7e, 0xf5, 0x98, 0x68, 0x67, - 0x54, 0x50, 0x10, 0xd3, 0xde, 0x2e, 0x0e, 0x7b, 0x65, 0x2e, 0x3e, 0xec, 0x64, 0xa2, 0xe5, 0x8d, - 0x7c, 0x89, 0x96, 0xc9, 0x09, 0x0d, 0xe6, 0x5b, 0xfe, 0xdc, 0x60, 0xbe, 0xe5, 0xad, 0xfc, 0xf9, - 0x96, 0x29, 0x36, 0x0c, 0x4d, 0xbb, 0xfc, 0x4e, 0x09, 0x2e, 0xdd, 0x6f, 0xd9, 0x30, 0x79, 0x26, - 0x57, 0x67, 0x5e, 0x79, 0x76, 0xff, 0x75, 0x48, 0xe6, 0xa1, 0xd2, 0xdb, 0xd5, 0x7d, 0xa5, 0x94, - 0x29, 0x83, 0xa5, 0xb2, 0xc1, 0x1a, 0xef, 0xb1, 0x43, 0x83, 0x2b, 0x73, 0xfc, 0x11, 0x05, 0x28, - 0x3b, 0x8e, 0xbb, 0xd4, 0xf7, 0x23, 0x9f, 0x40, 0x78, 0x1c, 0xaf, 0x8b, 0x66, 0x54, 0xfd, 0x24, - 0x80, 0x31, 0xe1, 0xd7, 0x93, 0x92, 0x69, 0xf4, 0xec, 0x99, 0x8c, 0xdc, 0xdc, 0xe8, 0xa5, 0xa4, - 0x8b, 0x58, 0xd2, 0x22, 0xb3, 0x50, 0x0e, 0xa2, 0x4c, 0x49, 0x65, 0x9a, 0x97, 0x33, 0xf4, 0x53, - 0x0e, 0xc7, 0x0c, 0x7b, 0x77, 0x9b, 0x7b, 0x32, 0x4d, 0x19, 0xb4, 0xb4, 0x5c, 0x87, 0x2b, 0x64, - 0xa5, 0xc8, 0xb0, 0xbf, 0x35, 0x00, 0x81, 0x19, 0xa3, 0xb4, 0xbf, 0xac, 0xc2, 0x85, 0xec, 0xf5, - 0xc0, 0xf8, 0xb6, 0x47, 0x3d, 0x9f, 0xe1, 0x2e, 0x24, 0xf9, 0x76, 0x5b, 0x34, 0xa3, 0xea, 0xff, - 0xa9, 0xce, 0xf2, 0xf9, 0x5a, 0x01, 0x2e, 0x7a, 0xd2, 0x31, 0xff, 0x4e, 0x64, 0xfa, 0x3c, 0x29, - 0xdc, 0x19, 0x43, 0x08, 0xe2, 0xf0, 0xb9, 0x90, 0xdf, 0x2a, 0xc0, 0x74, 0x37, 0xe5, 0xe7, 0x38, - 0xc5, 0x5b, 0x3a, 0x3c, 0x15, 0x79, 0x7d, 0x08, 0x3d, 0x1c, 0x3a, 0x13, 0xf2, 0x39, 0xa8, 0xf7, - 0xd8, 0xba, 0xf0, 0x03, 0xea, 0x18, 0xea, 0xa2, 0xce, 0xe8, 0x3b, 0x69, 0x23, 0xc2, 0xa5, 0xf2, - 0x7f, 0x84, 0x7e, 0x10, 0xeb, 0xc0, 0x38, 0xc5, 0x87, 0xfc, 0x5a, 0xce, 0x55, 0xa8, 0xfa, 0x34, - 0x08, 0x2c, 0xa7, 0x2d, 0xec, 0x8d, 0x9a, 0xd8, 0x2b, 0x2d, 0xd9, 0x86, 0x61, 0x2f, 0x79, 0x2f, - 0xd4, 0xb8, 0x9f, 0x7f, 0xc1, 0x6b, 0xfb, 0xd3, 0x35, 0x9e, 0xa3, 0x33, 0x29, 0xb2, 0x8e, 0x64, - 0x23, 0x46, 0xfd, 0xe4, 0x59, 0x98, 0xd8, 0xe6, 0xdb, 0x57, 0xde, 0xa1, 0x14, 0x3e, 0x2e, 0xae, - 0xad, 0x35, 0x63, 0xed, 0x98, 0x80, 0x22, 0xf3, 0x00, 0x34, 0x0c, 0x86, 0xa4, 0xfd, 0x59, 0x51, - 0x98, 0x04, 0x63, 0x50, 0xe4, 0x49, 0x28, 0x05, 0xb6, 0xcf, 0x7d, 0x58, 0xd5, 0xc8, 0x04, 0xdd, - 0x5c, 0x6b, 0x21, 0x6b, 0xd7, 0x7e, 0x5c, 0x80, 0x33, 0xa9, 0x8c, 0x7e, 0x36, 0xa4, 0xef, 0xd9, - 0xf2, 0x18, 0x09, 0x87, 0x6c, 0xe1, 0x1a, 0xb2, 0x76, 0xf2, 0xba, 0x54, 0xcb, 0x8b, 0x39, 0xaf, - 0x8b, 0xdf, 0xd4, 0x03, 0x9f, 0xe9, 0xe1, 0x03, 0x1a, 0x39, 0x8f, 0xad, 0x44, 0xf3, 0x91, 0x72, - 0x20, 0x16, 0x5b, 0x89, 0xfa, 0x30, 0x01, 0x99, 0x72, 0xf8, 0x95, 0x8f, 0xe2, 0xf0, 0xd3, 0xbe, - 0x5c, 0x8c, 0x71, 0x40, 0x6a, 0xf6, 0x0f, 0xe0, 0xc0, 0xd3, 0x4c, 0x80, 0x86, 0xc2, 0xbd, 0x16, - 0x97, 0x7f, 0x5c, 0x18, 0xcb, 0x5e, 0xf2, 0x8a, 0xe0, 0x7d, 0x29, 0xe7, 0xd5, 0xbf, 0xcd, 0xb5, - 0x96, 0x48, 0x69, 0x51, 0x5f, 0x2d, 0xfc, 0x04, 0xe5, 0x53, 0xfa, 0x04, 0xda, 0x9f, 0x96, 0xa0, - 0xfe, 0xb2, 0xbb, 0xfd, 0x53, 0x92, 0xb6, 0x9a, 0x2d, 0xa6, 0x8a, 0x3f, 0x41, 0x31, 0xb5, 0x05, - 0x8f, 0x07, 0x81, 0xdd, 0xa2, 0x86, 0xeb, 0x98, 0xfe, 0xc2, 0x4e, 0x40, 0xbd, 0x65, 0xcb, 0xb1, - 0xfc, 0x5d, 0x6a, 0xca, 0x70, 0xd2, 0x13, 0x87, 0x07, 0x8d, 0xc7, 0x37, 0x37, 0xd7, 0xb2, 0x40, - 0x70, 0xd8, 0x58, 0x7e, 0x6c, 0xe8, 0x46, 0xc7, 0xdd, 0xd9, 0xe1, 0xd7, 0x13, 0x64, 0xa2, 0x83, - 0x38, 0x36, 0x62, 0xed, 0x98, 0x80, 0xd2, 0xbe, 0x59, 0x84, 0x5a, 0x78, 0xdd, 0x98, 0x3c, 0x05, - 0xe3, 0xdb, 0x9e, 0xdb, 0xa1, 0x9e, 0x88, 0xdc, 0xc9, 0xeb, 0x09, 0x4d, 0xd1, 0x84, 0xaa, 0x8f, - 0xbc, 0x1b, 0x2a, 0x81, 0xdb, 0xb3, 0x8c, 0xb4, 0x43, 0x6d, 0x93, 0x35, 0xa2, 0xe8, 0x3b, 0xbd, - 0x05, 0xfe, 0x74, 0x42, 0xb5, 0xab, 0x0d, 0x55, 0xc6, 0x5e, 0x83, 0xb2, 0xaf, 0xfb, 0xb6, 0x94, - 0xa7, 0x39, 0x6e, 0xee, 0x2e, 0xb4, 0xd6, 0xe4, 0xcd, 0xdd, 0x85, 0xd6, 0x1a, 0x72, 0xa4, 0xda, - 0x0f, 0x8b, 0x50, 0x17, 0x7c, 0x13, 0xa7, 0xc2, 0x49, 0x72, 0xee, 0x45, 0x1e, 0xbf, 0xf6, 0xfb, - 0x5d, 0xea, 0x71, 0x37, 0x93, 0x3c, 0xe4, 0xe2, 0xf1, 0x81, 0xa8, 0x33, 0x8c, 0x61, 0x47, 0x4d, - 0x8a, 0xf5, 0xe5, 0x53, 0x64, 0x7d, 0xe5, 0x48, 0xac, 0x1f, 0x3b, 0x0d, 0xd6, 0xbf, 0x55, 0x84, - 0xda, 0x9a, 0xb5, 0x43, 0x8d, 0x7d, 0xc3, 0xe6, 0x17, 0xb1, 0x4c, 0x6a, 0xd3, 0x80, 0xae, 0x78, - 0xba, 0x41, 0x37, 0xa8, 0x67, 0xf1, 0x42, 0x19, 0x6c, 0x7f, 0xf0, 0x13, 0x48, 0x5e, 0xc4, 0x5a, - 0x1a, 0x02, 0x83, 0x43, 0x47, 0x93, 0x55, 0x98, 0x30, 0xa9, 0x6f, 0x79, 0xd4, 0xdc, 0x88, 0x19, - 0x2a, 0x4f, 0x29, 0x51, 0xb3, 0x14, 0xeb, 0xbb, 0x77, 0xd0, 0x98, 0x54, 0x0e, 0x4a, 0x61, 0xb1, - 0x24, 0x86, 0xb2, 0x2d, 0xdf, 0xd3, 0xfb, 0x7e, 0xd6, 0x1c, 0x63, 0x5b, 0x7e, 0x23, 0x1b, 0x04, - 0x87, 0x8d, 0xd5, 0x2a, 0x50, 0x5a, 0x73, 0xdb, 0xda, 0xdb, 0x25, 0x08, 0x2b, 0xaa, 0x90, 0x9f, - 0x2f, 0x40, 0x5d, 0x77, 0x1c, 0x37, 0x90, 0xd5, 0x4a, 0x44, 0x04, 0x1e, 0x73, 0x17, 0x6e, 0x99, - 0x5d, 0x88, 0x90, 0x8a, 0xe0, 0x6d, 0x18, 0x50, 0x8e, 0xf5, 0x60, 0x9c, 0x36, 0xe9, 0xa7, 0xe2, - 0xc9, 0xeb, 0xf9, 0x67, 0x71, 0x84, 0xe8, 0xf1, 0xcc, 0x47, 0xe1, 0x6c, 0x7a, 0xb2, 0xc7, 0x09, - 0x07, 0xe5, 0x89, 0x24, 0x7d, 0xa9, 0x06, 0xf5, 0x9b, 0x7a, 0x60, 0xed, 0x51, 0x6e, 0xf4, 0x9f, - 0x8e, 0xe5, 0xf5, 0x6b, 0x05, 0xb8, 0x90, 0x8c, 0xec, 0x9e, 0xa2, 0xf9, 0xc5, 0x2f, 0xe7, 0x61, - 0x26, 0x35, 0x1c, 0x32, 0x0b, 0x6e, 0x88, 0x0d, 0x04, 0x8a, 0x4f, 0xdb, 0x10, 0x6b, 0x0d, 0x23, - 0x88, 0xc3, 0xe7, 0xf2, 0xd3, 0x62, 0x88, 0x3d, 0xdc, 0xc5, 0x13, 0x52, 0x66, 0xe2, 0xf8, 0x43, - 0x63, 0x26, 0x56, 0x1f, 0x0a, 0x0d, 0xb4, 0x17, 0x33, 0x13, 0x6b, 0x39, 0xc3, 0x15, 0x32, 0x19, - 0x4a, 0x60, 0x1b, 0x66, 0x6e, 0xf2, 0xcb, 0x23, 0x4a, 0x7d, 0x27, 0x06, 0x54, 0xb6, 0x75, 0xdf, - 0x32, 0xa4, 0x3e, 0x9e, 0xa3, 0x58, 0x8c, 0xba, 0x55, 0x2f, 0xbc, 0x9a, 0xfc, 0x11, 0x05, 0xee, - 0xe8, 0xf6, 0x7e, 0x31, 0xd7, 0xed, 0x7d, 0xb2, 0x08, 0x65, 0x87, 0x1d, 0xb6, 0xa5, 0x63, 0xdf, - 0xd7, 0xbf, 0x79, 0x83, 0xee, 0x23, 0x1f, 0xcc, 0x74, 0x5a, 0x60, 0xaf, 0x7f, 0x34, 0x83, 0xed, - 0x3d, 0x30, 0xee, 0xf7, 0x79, 0x7c, 0x40, 0x4a, 0xf8, 0x28, 0xc6, 0x23, 0x9a, 0x51, 0xf5, 0x33, - 0xed, 0xed, 0xd3, 0x7d, 0xda, 0x57, 0xde, 0xc7, 0x50, 0x7b, 0xfb, 0x18, 0x6b, 0x44, 0xd1, 0x77, - 0x7a, 0xca, 0x97, 0x32, 0xec, 0x2a, 0xa7, 0x65, 0xd8, 0xd5, 0x60, 0xfc, 0xa6, 0xcb, 0x43, 0xc6, - 0xda, 0x3f, 0x15, 0x01, 0xa2, 0x90, 0x1c, 0xf9, 0x6a, 0x01, 0x1e, 0x0b, 0x37, 0x5c, 0x20, 0xae, - 0xed, 0x2e, 0xda, 0xba, 0xd5, 0xcd, 0x6d, 0xe4, 0x65, 0x6d, 0x76, 0x7e, 0x02, 0x6d, 0x64, 0x91, - 0xc3, 0xec, 0x59, 0x10, 0x84, 0x2a, 0xed, 0xf6, 0x82, 0xfd, 0x25, 0xcb, 0x93, 0x2b, 0x30, 0x33, - 0xf2, 0x7b, 0x4d, 0xc2, 0x88, 0xa1, 0xf2, 0x8a, 0x26, 0xdf, 0x44, 0xaa, 0x07, 0x43, 0x3c, 0x64, - 0x17, 0xaa, 0x8e, 0xfb, 0xba, 0xcf, 0xd8, 0x21, 0x97, 0xe3, 0x4b, 0xa3, 0xb3, 0x5c, 0xb0, 0x55, - 0x18, 0x05, 0xf2, 0x01, 0xc7, 0x1d, 0xc9, 0xec, 0xaf, 0x14, 0xe1, 0x7c, 0x06, 0x1f, 0xc8, 0x4b, - 0x70, 0x56, 0x46, 0x3f, 0xa3, 0x12, 0x62, 0x85, 0xa8, 0x84, 0x58, 0x2b, 0xd5, 0x87, 0x03, 0xd0, - 0xe4, 0x75, 0x00, 0xdd, 0x30, 0xa8, 0xef, 0xaf, 0xbb, 0xa6, 0x52, 0x60, 0x5f, 0x3c, 0x3c, 0x68, - 0xc0, 0x42, 0xd8, 0x7a, 0xef, 0xa0, 0xf1, 0xfe, 0xac, 0x84, 0x86, 0x14, 0x9f, 0xa3, 0x01, 0x18, - 0x43, 0x49, 0x3e, 0x05, 0x20, 0xae, 0x6d, 0x87, 0x17, 0x5d, 0x1e, 0x10, 0x30, 0x9a, 0x55, 0x57, - 0x8a, 0x67, 0x3f, 0xd6, 0xd7, 0x9d, 0xc0, 0x0a, 0xf6, 0xc5, 0xbd, 0xc2, 0xdb, 0x21, 0x16, 0x8c, - 0x61, 0xd4, 0xfe, 0xb8, 0x08, 0x55, 0xa5, 0x58, 0xbf, 0x03, 0x21, 0xc1, 0x76, 0x22, 0x24, 0x78, - 0x42, 0x29, 0x0c, 0x59, 0x41, 0x40, 0x37, 0x15, 0x04, 0x5c, 0xc9, 0x4f, 0xea, 0xfe, 0x61, 0xbf, - 0x6f, 0x14, 0x61, 0x4a, 0x81, 0xca, 0xf2, 0x0e, 0xcf, 0xc3, 0xa4, 0x47, 0x75, 0xb3, 0xa9, 0x07, - 0xc6, 0x2e, 0xff, 0x7c, 0x05, 0x7e, 0xb1, 0xe8, 0xdc, 0xe1, 0x41, 0x63, 0x12, 0xe3, 0x1d, 0x98, - 0x84, 0x23, 0x1f, 0x81, 0x33, 0xc2, 0xf5, 0xb8, 0xae, 0xdf, 0x15, 0x57, 0x2c, 0x39, 0xc3, 0xca, - 0x22, 0x6b, 0xa0, 0x99, 0xec, 0xc2, 0x34, 0x2c, 0x5b, 0xd6, 0xa2, 0x69, 0xcb, 0xd7, 0xdb, 0x62, - 0x32, 0x9c, 0x0b, 0x93, 0x62, 0x59, 0x37, 0x53, 0x7d, 0x38, 0x00, 0x4d, 0x74, 0xa8, 0xb3, 0x19, - 0x6d, 0x5a, 0x5d, 0xea, 0xf6, 0x55, 0xd5, 0xc4, 0x91, 0x22, 0xd3, 0x18, 0xa1, 0xc1, 0x38, 0x4e, - 0xed, 0xaf, 0x0a, 0x30, 0x11, 0xf1, 0xeb, 0xd4, 0x03, 0xa3, 0x3b, 0xc9, 0xc0, 0xe8, 0x42, 0xee, - 0xe5, 0x30, 0x24, 0x14, 0xfa, 0x4b, 0xe3, 0x90, 0xc8, 0xa4, 0x21, 0xdb, 0x30, 0x63, 0x65, 0xc6, - 0x03, 0x63, 0xa7, 0x4d, 0x98, 0x8f, 0xbf, 0x3a, 0x14, 0x12, 0xef, 0x83, 0x85, 0xf4, 0xa1, 0xba, - 0x47, 0xbd, 0xc0, 0x32, 0xa8, 0x7a, 0xbf, 0x95, 0xdc, 0x2a, 0x99, 0x48, 0x13, 0x8b, 0x78, 0x7a, - 0x5b, 0x12, 0xc0, 0x90, 0x14, 0xd9, 0x86, 0x0a, 0x35, 0xdb, 0x54, 0x5d, 0x7a, 0xcd, 0x59, 0x52, - 0x26, 0xe4, 0x27, 0x7b, 0xf2, 0x51, 0xa0, 0x26, 0x3e, 0xd4, 0x6c, 0xe5, 0x8a, 0x90, 0xeb, 0x70, - 0x74, 0x05, 0x2b, 0x74, 0x6a, 0x44, 0xf7, 0x61, 0xc2, 0x26, 0x8c, 0xe8, 0x90, 0x4e, 0x58, 0xc7, - 0xab, 0x72, 0x42, 0x87, 0xc7, 0x7d, 0x2a, 0x79, 0xf9, 0x50, 0xbb, 0xa3, 0x07, 0xd4, 0xeb, 0xea, - 0x5e, 0x47, 0x5a, 0x1b, 0xa3, 0xbf, 0xe1, 0x2b, 0x0a, 0x53, 0xf4, 0x86, 0x61, 0x13, 0x46, 0x74, - 0x88, 0x0b, 0xb5, 0x40, 0xaa, 0xcf, 0xaa, 0xfa, 0xc7, 0xe8, 0x44, 0x95, 0x22, 0xee, 0xcb, 0x8c, - 0x1a, 0xf5, 0x88, 0x11, 0x0d, 0xb2, 0x97, 0x28, 0xb7, 0x25, 0x8a, 0xac, 0x35, 0x73, 0xd4, 0xfa, - 0x93, 0xa8, 0x22, 0x71, 0x93, 0x5d, 0xb6, 0x4b, 0xfb, 0x66, 0x25, 0x3a, 0x96, 0xdf, 0xe9, 0x08, - 0xfc, 0xb3, 0xc9, 0x08, 0xfc, 0xe5, 0x74, 0x04, 0x3e, 0xe5, 0xd1, 0x3a, 0x7e, 0x0c, 0x5e, 0x87, - 0xba, 0xad, 0xfb, 0xc1, 0x56, 0xcf, 0xd4, 0x03, 0x19, 0x72, 0xa9, 0xcf, 0xff, 0xaf, 0xa3, 0x9d, - 0x9a, 0xec, 0x1c, 0x8e, 0x3c, 0x4c, 0x6b, 0x11, 0x1a, 0x8c, 0xe3, 0x24, 0xcf, 0x40, 0x7d, 0x8f, - 0x9f, 0x04, 0xe2, 0x06, 0x6d, 0x85, 0x8b, 0x11, 0x7e, 0xb2, 0xdf, 0x8e, 0x9a, 0x31, 0x0e, 0xc3, - 0x86, 0x08, 0x0d, 0x24, 0x2a, 0x41, 0x24, 0x87, 0xb4, 0xa2, 0x66, 0x8c, 0xc3, 0xf0, 0xf0, 0x9d, - 0xe5, 0x74, 0xc4, 0x80, 0x71, 0x3e, 0x40, 0x84, 0xef, 0x54, 0x23, 0x46, 0xfd, 0xe4, 0x2a, 0x54, - 0xfb, 0xe6, 0x8e, 0x80, 0xad, 0x72, 0x58, 0xae, 0x61, 0x6e, 0x2d, 0x2d, 0xcb, 0x1b, 0xbd, 0xaa, - 0x97, 0xcd, 0xa4, 0xab, 0xf7, 0x54, 0x07, 0xb7, 0x0d, 0xe5, 0x4c, 0xd6, 0xa3, 0x66, 0x8c, 0xc3, - 0x90, 0x0f, 0xc1, 0x94, 0x47, 0xcd, 0xbe, 0x41, 0xc3, 0x51, 0xc0, 0x47, 0xf1, 0xcb, 0xde, 0x98, - 0xe8, 0xc1, 0x14, 0xe4, 0x90, 0x64, 0x83, 0xfa, 0x48, 0xc9, 0x06, 0xdf, 0x2b, 0x00, 0x19, 0x4c, - 0x77, 0x21, 0xbb, 0x30, 0xe6, 0x70, 0xef, 0x57, 0xee, 0xa2, 0x65, 0x31, 0x27, 0x9a, 0x38, 0x96, - 0x64, 0x83, 0xc4, 0x4f, 0x1c, 0xa8, 0xd2, 0xbb, 0x01, 0xf5, 0x9c, 0x30, 0xfd, 0xed, 0x64, 0x0a, - 0xa4, 0x09, 0x6b, 0x40, 0x62, 0xc6, 0x90, 0x86, 0xf6, 0x83, 0x22, 0xd4, 0x63, 0x70, 0x0f, 0x32, - 0x2a, 0xf9, 0x0d, 0x1c, 0xe1, 0x74, 0xda, 0xf2, 0x6c, 0xb9, 0xc3, 0x62, 0x37, 0x70, 0x64, 0x17, - 0xae, 0x61, 0x1c, 0x8e, 0xcc, 0x03, 0x74, 0x75, 0x3f, 0xa0, 0x1e, 0x97, 0xbe, 0xa9, 0x7b, 0x2f, - 0xeb, 0x61, 0x0f, 0xc6, 0xa0, 0xc8, 0x15, 0x59, 0xe2, 0xae, 0x9c, 0x2c, 0x0e, 0x31, 0xa4, 0x7e, - 0x5d, 0xe5, 0x04, 0xea, 0xd7, 0x91, 0x36, 0x9c, 0x55, 0xb3, 0x56, 0xbd, 0xc7, 0x2b, 0x1d, 0x20, - 0xec, 0x97, 0x14, 0x0a, 0x1c, 0x40, 0xaa, 0x7d, 0xb3, 0x00, 0x93, 0x09, 0x97, 0x87, 0x28, 0xeb, - 0xa0, 0x92, 0xb5, 0x12, 0x65, 0x1d, 0x62, 0x39, 0x56, 0x4f, 0xc3, 0x98, 0x60, 0x50, 0x3a, 0x06, - 0x2b, 0x58, 0x88, 0xb2, 0x97, 0x9d, 0x65, 0xd2, 0xa9, 0x9a, 0x3e, 0xcb, 0xa4, 0xd7, 0x15, 0x55, - 0x3f, 0x79, 0x1f, 0x54, 0xd5, 0xec, 0x24, 0xa7, 0xa3, 0x6a, 0x8f, 0xb2, 0x1d, 0x43, 0x08, 0xed, - 0xdf, 0x4b, 0xc0, 0x23, 0x16, 0xe4, 0x79, 0xa8, 0x75, 0xa9, 0xb1, 0xab, 0x3b, 0x96, 0xaf, 0xca, - 0xba, 0x30, 0xeb, 0xb6, 0xb6, 0xae, 0x1a, 0xef, 0x31, 0x04, 0x0b, 0xad, 0x35, 0x9e, 0x14, 0x14, - 0xc1, 0x12, 0x03, 0xc6, 0xda, 0xbe, 0xaf, 0xf7, 0xac, 0xdc, 0xb5, 0x6d, 0x45, 0x19, 0x0d, 0xb1, - 0x89, 0xc4, 0x6f, 0x94, 0xa8, 0x89, 0x01, 0x95, 0x9e, 0xad, 0x5b, 0x4e, 0xee, 0x3a, 0xc2, 0xec, - 0x0d, 0x36, 0x18, 0x26, 0xe1, 0xd2, 0xe1, 0x3f, 0x51, 0xe0, 0x26, 0x7d, 0xa8, 0xfb, 0x86, 0xa7, - 0x77, 0xfd, 0x5d, 0x7d, 0xfe, 0xb9, 0x0f, 0xe4, 0x56, 0x92, 0x22, 0x52, 0xe2, 0xcc, 0x5e, 0xc4, - 0x85, 0xf5, 0xd6, 0xf5, 0x85, 0xf9, 0xe7, 0x3e, 0x80, 0x71, 0x3a, 0x71, 0xb2, 0xcf, 0x3d, 0x33, - 0x2f, 0xd7, 0xfd, 0x89, 0x93, 0x7d, 0xee, 0x99, 0x79, 0x8c, 0xd3, 0xd1, 0xfe, 0xad, 0x00, 0xb5, - 0x10, 0x96, 0x6c, 0x01, 0xb0, 0x1d, 0x28, 0x0b, 0x5f, 0x1c, 0xab, 0x08, 0x25, 0xb7, 0x8a, 0xb7, - 0xc2, 0xc1, 0x18, 0x43, 0x94, 0x51, 0x19, 0xa4, 0x78, 0xd2, 0x95, 0x41, 0xe6, 0xa0, 0xb6, 0xab, - 0x3b, 0xa6, 0xbf, 0xab, 0x77, 0xc4, 0x41, 0x14, 0xab, 0x95, 0x73, 0x5d, 0x75, 0x60, 0x04, 0xa3, - 0xfd, 0x73, 0x05, 0x44, 0x75, 0x56, 0xb6, 0x55, 0x4c, 0xcb, 0x17, 0x69, 0x16, 0x05, 0x3e, 0x32, - 0xdc, 0x2a, 0x4b, 0xb2, 0x1d, 0x43, 0x08, 0x72, 0x11, 0x4a, 0x5d, 0xcb, 0x91, 0x11, 0x0f, 0xee, - 0xf0, 0x5a, 0xb7, 0x1c, 0x64, 0x6d, 0xbc, 0x4b, 0xbf, 0x2b, 0x23, 0x64, 0xa2, 0x4b, 0xbf, 0x8b, - 0xac, 0x8d, 0x99, 0xa0, 0xb6, 0xeb, 0x76, 0xb6, 0x75, 0xa3, 0xa3, 0x02, 0x69, 0x65, 0x2e, 0x08, - 0xb9, 0x09, 0xba, 0x96, 0xec, 0xc2, 0x34, 0x2c, 0x59, 0x81, 0x33, 0x86, 0xeb, 0xda, 0xa6, 0x7b, - 0xc7, 0x51, 0xc3, 0x85, 0xea, 0xc0, 0x23, 0x09, 0x4b, 0xb4, 0xe7, 0x51, 0x83, 0xe9, 0x17, 0x8b, - 0x49, 0x20, 0x4c, 0x8f, 0x22, 0x5b, 0xf0, 0xf8, 0x9b, 0xd4, 0x73, 0xe5, 0x71, 0xd1, 0xb2, 0x29, - 0xed, 0x29, 0x84, 0x42, 0xb1, 0xe0, 0x81, 0xbd, 0x4f, 0x64, 0x83, 0xe0, 0xb0, 0xb1, 0x3c, 0x45, - 0x40, 0xf7, 0xda, 0x34, 0xd8, 0xf0, 0x5c, 0x83, 0xfa, 0xbe, 0xe5, 0xb4, 0x15, 0xda, 0xf1, 0x08, - 0xed, 0x66, 0x36, 0x08, 0x0e, 0x1b, 0x4b, 0x5e, 0x85, 0x69, 0xd1, 0x25, 0xa4, 0xf6, 0xc2, 0x9e, - 0x6e, 0xd9, 0xfa, 0xb6, 0x65, 0xab, 0xba, 0xf9, 0x93, 0x22, 0x40, 0xb1, 0x39, 0x04, 0x06, 0x87, - 0x8e, 0xe6, 0xd5, 0xee, 0x65, 0x78, 0x6a, 0x83, 0x7a, 0x7c, 0x1d, 0x48, 0x7d, 0x46, 0x54, 0xbb, - 0x4f, 0xf5, 0xe1, 0x00, 0x34, 0x41, 0xb8, 0xc0, 0xab, 0xfa, 0x6e, 0xf5, 0x52, 0x4c, 0x97, 0x1a, - 0x0e, 0x8f, 0x43, 0xb5, 0x32, 0x21, 0x70, 0xc8, 0x48, 0xf6, 0xbe, 0xbc, 0x67, 0xc9, 0xbd, 0xe3, - 0xa4, 0xb1, 0xd6, 0xa3, 0xf7, 0x6d, 0x0d, 0x81, 0xc1, 0xa1, 0xa3, 0xb5, 0x3f, 0x2a, 0xc2, 0x64, - 0xe2, 0xa2, 0xcc, 0x43, 0x77, 0x21, 0x81, 0xa9, 0x8a, 0x5d, 0xbf, 0xbd, 0xba, 0x74, 0x9d, 0xea, - 0x26, 0xf5, 0x6e, 0x50, 0x75, 0xa9, 0x89, 0xef, 0xfe, 0xf5, 0x44, 0x0f, 0xa6, 0x20, 0xc9, 0x0e, - 0x54, 0x84, 0xe3, 0x33, 0x6f, 0xdd, 0x51, 0xc5, 0x23, 0xee, 0xfd, 0xe4, 0xb2, 0x41, 0xf8, 0x3e, - 0x05, 0x7a, 0x2d, 0x80, 0x89, 0x38, 0x04, 0xdb, 0xf1, 0x91, 0x56, 0x35, 0x9e, 0xd0, 0xa8, 0x56, - 0xa1, 0x14, 0x04, 0xa3, 0x5e, 0x75, 0x10, 0x8e, 0xf4, 0xcd, 0x35, 0x64, 0x38, 0xb4, 0x1d, 0xf6, - 0xed, 0x7c, 0xdf, 0x72, 0x1d, 0x59, 0x6c, 0x6c, 0x0b, 0xc6, 0x03, 0xe9, 0x4b, 0x1a, 0xed, 0xaa, - 0x06, 0xf7, 0xeb, 0x2a, 0x3f, 0x92, 0xc2, 0xa5, 0xfd, 0x75, 0x11, 0x6a, 0xa1, 0xdd, 0x77, 0x84, - 0x22, 0x5e, 0x2e, 0xd4, 0xc2, 0x6c, 0xa1, 0xdc, 0xff, 0x21, 0x10, 0x95, 0xa5, 0xe6, 0xa6, 0x4a, - 0xf8, 0x88, 0x11, 0x8d, 0x78, 0x5d, 0xf1, 0x52, 0x8e, 0xba, 0xe2, 0x3d, 0x18, 0x0f, 0x3c, 0xab, - 0xdd, 0x96, 0x4a, 0x68, 0x7d, 0x7e, 0x35, 0xbf, 0xe5, 0xbc, 0x29, 0x10, 0x4a, 0xce, 0x8a, 0x07, - 0x54, 0x64, 0xb4, 0x37, 0xe0, 0x6c, 0x1a, 0x92, 0x6b, 0x68, 0xc6, 0x2e, 0x35, 0xfb, 0xb6, 0xe2, - 0x71, 0xa4, 0xa1, 0xc9, 0x76, 0x0c, 0x21, 0x98, 0x95, 0xc6, 0x3e, 0xd3, 0x9b, 0xae, 0xa3, 0xec, - 0x5f, 0xae, 0xec, 0x6e, 0xca, 0x36, 0x0c, 0x7b, 0xb5, 0x7f, 0x2c, 0xc1, 0xc5, 0xc8, 0x7a, 0x5f, - 0xd7, 0x1d, 0xbd, 0x7d, 0x84, 0xc2, 0xf1, 0x3f, 0x4b, 0x7e, 0x3b, 0x6e, 0x25, 0xc6, 0xd2, 0x43, - 0x50, 0x89, 0xf1, 0x87, 0x05, 0xe0, 0x7f, 0xcf, 0x40, 0x3e, 0x07, 0x13, 0x7a, 0xec, 0x3f, 0x43, - 0xe4, 0xe7, 0xbc, 0x96, 0xfb, 0x73, 0xf2, 0x7f, 0x81, 0x08, 0xb3, 0x55, 0xe3, 0xad, 0x98, 0x20, - 0x48, 0x5c, 0xa8, 0xee, 0xe8, 0xb6, 0xcd, 0x94, 0x96, 0xdc, 0xd1, 0x88, 0x04, 0x71, 0xbe, 0xcc, - 0x97, 0x25, 0x6a, 0x0c, 0x89, 0x68, 0xff, 0x50, 0x80, 0xc9, 0x96, 0x6d, 0x99, 0x96, 0xd3, 0x3e, - 0xc5, 0x12, 0x8c, 0xb7, 0xa0, 0xe2, 0xdb, 0x96, 0x49, 0x47, 0x3c, 0xc7, 0x85, 0x04, 0x61, 0x08, - 0x50, 0xe0, 0x49, 0xd6, 0x74, 0x2c, 0x1d, 0xa1, 0xa6, 0xe3, 0x8f, 0xc6, 0x40, 0xfe, 0xc5, 0x07, - 0xe9, 0x43, 0xad, 0xad, 0x4a, 0xc5, 0xc9, 0x77, 0xbc, 0x9e, 0xa3, 0xe2, 0x45, 0xa2, 0xe8, 0x9c, - 0x38, 0x75, 0xc3, 0x46, 0x8c, 0x28, 0x11, 0x9a, 0xfc, 0x9b, 0x98, 0xa5, 0x9c, 0x7f, 0x13, 0x23, - 0xc8, 0x0d, 0xfe, 0x51, 0x8c, 0x0e, 0xe5, 0xdd, 0x20, 0xe8, 0xc9, 0x7d, 0x35, 0xfa, 0xcd, 0xcc, - 0xe8, 0xd2, 0xa5, 0xd0, 0x46, 0xd8, 0x33, 0x72, 0xd4, 0x8c, 0x84, 0xa3, 0x87, 0xd5, 0xc9, 0x17, - 0x73, 0x45, 0xa4, 0xe3, 0x24, 0xd8, 0x33, 0x72, 0xd4, 0xe4, 0x33, 0x50, 0x0f, 0x3c, 0xdd, 0xf1, - 0x77, 0x5c, 0xaf, 0x4b, 0x3d, 0x69, 0xc6, 0x2d, 0xe7, 0xf8, 0xa7, 0x94, 0xcd, 0x08, 0x9b, 0x08, - 0x75, 0x25, 0x9a, 0x30, 0x4e, 0x8d, 0x74, 0xa0, 0xda, 0x37, 0xc5, 0xc4, 0xa4, 0x7f, 0x63, 0x21, - 0xcf, 0x9f, 0xdf, 0xc4, 0xe2, 0xcd, 0xea, 0x09, 0x43, 0x02, 0xc9, 0x42, 0xfc, 0xe3, 0x27, 0x55, - 0x88, 0x3f, 0xbe, 0x1a, 0xb3, 0x6e, 0x84, 0x91, 0xae, 0xd4, 0x28, 0x9d, 0xb6, 0x4c, 0x97, 0x59, - 0xce, 0xad, 0xec, 0x09, 0x92, 0xf5, 0x50, 0x2b, 0x75, 0xda, 0xa8, 0x68, 0x68, 0x5d, 0x90, 0x6e, - 0x68, 0x62, 0x24, 0xca, 0xd5, 0x8a, 0x34, 0xc5, 0xb9, 0xa3, 0x9d, 0x07, 0x61, 0xdd, 0xd4, 0x58, - 0xb9, 0xac, 0xcc, 0xba, 0xb4, 0xda, 0xdf, 0x14, 0xa1, 0xb4, 0xb9, 0xd6, 0x12, 0xd5, 0x58, 0x78, - 0x2d, 0x68, 0xda, 0xea, 0x58, 0xbd, 0xdb, 0xd4, 0xb3, 0x76, 0xf6, 0xa5, 0x75, 0x1a, 0xab, 0xc6, - 0x92, 0x86, 0xc0, 0x8c, 0x51, 0xe4, 0x35, 0x98, 0x30, 0xf4, 0x45, 0xea, 0x05, 0xa3, 0xd8, 0xde, - 0x3c, 0x1f, 0x7b, 0x71, 0x21, 0x1a, 0x8e, 0x09, 0x64, 0x64, 0x0b, 0xc0, 0x88, 0x50, 0x97, 0x8e, - 0xed, 0x31, 0x88, 0x21, 0x8e, 0x21, 0x22, 0x08, 0xb5, 0x0e, 0x03, 0xe5, 0x58, 0xcb, 0xc7, 0xc1, - 0xca, 0x57, 0xce, 0x0d, 0x35, 0x16, 0x23, 0x34, 0x9a, 0x03, 0x93, 0x89, 0x1a, 0xb6, 0xe4, 0x83, - 0x50, 0x75, 0x7b, 0xb1, 0xe3, 0xb4, 0xc6, 0xcd, 0xe9, 0xea, 0x2d, 0xd9, 0x76, 0xef, 0xa0, 0x31, - 0xb9, 0xe6, 0xb6, 0x2d, 0x43, 0x35, 0x60, 0x08, 0x4e, 0x34, 0x18, 0xe3, 0x49, 0x94, 0xaa, 0x82, - 0x2d, 0x97, 0x1d, 0xbc, 0xc8, 0xa4, 0x8f, 0xb2, 0x47, 0xfb, 0x7c, 0x19, 0xa2, 0xe0, 0x0d, 0xf1, - 0x61, 0xcc, 0xe4, 0x85, 0x26, 0xe5, 0xc9, 0x3d, 0x7a, 0x10, 0x2c, 0x59, 0x85, 0x5b, 0xd8, 0x47, - 0xc9, 0x36, 0x94, 0xa4, 0x48, 0x1b, 0x4a, 0x6f, 0xb8, 0xdb, 0xb9, 0x0f, 0xee, 0xd8, 0xed, 0x09, - 0xe1, 0x4e, 0x8a, 0x35, 0x20, 0xa3, 0x40, 0x7e, 0xbd, 0x00, 0xe7, 0xfc, 0xb4, 0xd2, 0x29, 0x97, - 0x03, 0xe6, 0xd7, 0xae, 0xd3, 0x6a, 0xac, 0xcc, 0xa0, 0x1c, 0xd6, 0x8d, 0x83, 0x73, 0x61, 0xfc, - 0x17, 0x51, 0x15, 0xb9, 0x9c, 0x56, 0x72, 0xfe, 0xef, 0x42, 0x92, 0xff, 0xc9, 0x36, 0x94, 0xa4, - 0xb4, 0x2f, 0x16, 0xa1, 0x1e, 0x3b, 0xad, 0x73, 0x17, 0x46, 0xbe, 0x9b, 0x2a, 0x8c, 0xbc, 0x31, - 0x7a, 0x90, 0x31, 0x9a, 0xd5, 0x69, 0xd7, 0x46, 0xfe, 0x93, 0x22, 0x94, 0xb6, 0x96, 0x96, 0x93, - 0xe6, 0x62, 0xe1, 0x1d, 0x30, 0x17, 0x77, 0x61, 0x7c, 0xbb, 0x6f, 0xd9, 0x81, 0xe5, 0xe4, 0xbe, - 0xdf, 0xa5, 0xea, 0x48, 0xcb, 0x6b, 0x12, 0x02, 0x2b, 0x2a, 0xf4, 0xa4, 0x0d, 0xe3, 0x6d, 0x51, - 0x60, 0x23, 0x77, 0xea, 0x95, 0x2c, 0xd4, 0x21, 0x08, 0xc9, 0x07, 0x54, 0xd8, 0xb5, 0xcf, 0x82, - 0xfc, 0x27, 0x3a, 0xe2, 0x9f, 0x0e, 0x37, 0x43, 0x65, 0x34, 0x8b, 0xa3, 0xda, 0x67, 0x20, 0xd4, - 0x04, 0xde, 0xf1, 0xcf, 0xa9, 0xfd, 0x4b, 0x01, 0x92, 0xca, 0xcf, 0x3b, 0xbf, 0xa2, 0x3a, 0xe9, - 0x15, 0xb5, 0x74, 0x12, 0x1b, 0x30, 0x7b, 0x51, 0x69, 0x7f, 0x50, 0x84, 0x31, 0xf9, 0xdf, 0x82, - 0xa7, 0x9f, 0x49, 0x46, 0x13, 0x99, 0x64, 0x8b, 0x39, 0x0f, 0xc7, 0xa1, 0x79, 0x64, 0xdd, 0x54, - 0x1e, 0x59, 0xde, 0x7f, 0xbf, 0x79, 0x40, 0x16, 0xd9, 0x5f, 0x14, 0x40, 0x1e, 0xcd, 0xab, 0x8e, - 0x1f, 0xe8, 0x8e, 0xc1, 0xff, 0x84, 0x51, 0xca, 0x81, 0xbc, 0xe9, 0x0a, 0x32, 0xa5, 0x47, 0x88, - 0x7e, 0xfe, 0x5b, 0x9d, 0xfb, 0xe4, 0x7d, 0x50, 0xdd, 0x75, 0xfd, 0x80, 0x9f, 0xf5, 0xc5, 0xa4, - 0x6f, 0xe7, 0xba, 0x6c, 0xc7, 0x10, 0x22, 0x1d, 0xd6, 0xab, 0x0c, 0x0f, 0xeb, 0x69, 0x5f, 0x2f, - 0xc2, 0x44, 0xe2, 0x3f, 0x8f, 0x46, 0x4e, 0x8a, 0x4b, 0xe5, 0xa4, 0x15, 0x4f, 0x3e, 0x27, 0x2d, - 0x2b, 0xef, 0xae, 0x94, 0x33, 0xef, 0xae, 0x7c, 0x9c, 0xbc, 0x3b, 0xed, 0xdb, 0x05, 0x00, 0xc5, - 0xad, 0x53, 0x4f, 0x89, 0x33, 0x93, 0x29, 0x71, 0xb9, 0xd7, 0x55, 0x76, 0x42, 0xdc, 0xef, 0x55, - 0xd4, 0x2b, 0xf1, 0x74, 0xb8, 0xb7, 0x0a, 0x30, 0xa5, 0x27, 0x52, 0xcc, 0x72, 0xab, 0x97, 0xa9, - 0x8c, 0xb5, 0xf0, 0xdf, 0x07, 0x93, 0xed, 0x98, 0x22, 0x4b, 0x5e, 0x88, 0xaa, 0x6a, 0xdd, 0x8c, - 0x96, 0xfd, 0x40, 0x39, 0x2c, 0xae, 0xea, 0x24, 0x20, 0x1f, 0x90, 0xd2, 0x57, 0x3a, 0x91, 0x94, - 0xbe, 0xf8, 0x65, 0xa5, 0xf2, 0x7d, 0x2f, 0x2b, 0xed, 0x41, 0x6d, 0xc7, 0x73, 0xbb, 0x3c, 0x6b, - 0x4e, 0xfe, 0x6f, 0xce, 0xb5, 0x1c, 0x32, 0x25, 0xfa, 0xc7, 0xb8, 0x48, 0xb4, 0x2e, 0x2b, 0xfc, - 0x18, 0x91, 0xe2, 0x4e, 0x69, 0x57, 0x50, 0x1d, 0x3b, 0x49, 0xaa, 0xe1, 0x59, 0xb2, 0x29, 0xb0, - 0xa3, 0x22, 0x93, 0xcc, 0x94, 0x1b, 0x7f, 0x67, 0x32, 0xe5, 0xb4, 0x5f, 0x2c, 0xab, 0x03, 0xec, - 0xa1, 0x2b, 0xe0, 0x22, 0x2b, 0xf9, 0xc5, 0x93, 0xc7, 0xde, 0x17, 0x5b, 0x36, 0x22, 0xe5, 0x37, - 0x3c, 0x01, 0x32, 0x96, 0x0e, 0xcf, 0xb9, 0x90, 0xff, 0x06, 0x3f, 0x90, 0x73, 0x21, 0xff, 0xa9, - 0x3d, 0x84, 0x20, 0x4f, 0xc3, 0x98, 0x47, 0x75, 0xdf, 0x75, 0xd2, 0x77, 0x53, 0x91, 0xb7, 0xa2, - 0xec, 0x8d, 0x27, 0xb0, 0x8d, 0x3d, 0x20, 0x81, 0xcd, 0x84, 0x09, 0x5b, 0xf7, 0x03, 0x1e, 0x1f, - 0x34, 0x17, 0xd4, 0x3f, 0x57, 0x1c, 0x27, 0x83, 0x2d, 0xdc, 0xaf, 0x6b, 0x31, 0x3c, 0x98, 0xc0, - 0x3a, 0x24, 0x2f, 0xab, 0x3a, 0x52, 0x5e, 0xd6, 0x41, 0x09, 0x52, 0xb6, 0xd3, 0xcf, 0x22, 0x14, - 0xff, 0xa5, 0x22, 0x14, 0x6f, 0x17, 0x21, 0x3a, 0x08, 0x8e, 0x99, 0x6a, 0xf1, 0x2a, 0x54, 0xbb, - 0xfa, 0xdd, 0x25, 0x6a, 0xeb, 0xfb, 0x79, 0xfe, 0xec, 0x64, 0x5d, 0xe2, 0xc0, 0x10, 0x1b, 0xf1, - 0x01, 0xac, 0xb0, 0xf6, 0x5d, 0x6e, 0x8f, 0x73, 0x54, 0x46, 0x4f, 0xf8, 0xb4, 0xa2, 0x67, 0x8c, - 0x91, 0xd1, 0xfe, 0xbc, 0x08, 0xb2, 0x48, 0x22, 0xa1, 0x50, 0xd9, 0xb1, 0xee, 0x52, 0x33, 0x77, - 0xda, 0x61, 0xec, 0x2f, 0xa8, 0x84, 0x4b, 0x9d, 0x37, 0xa0, 0xc0, 0xce, 0x7d, 0xa5, 0x22, 0x44, - 0x22, 0xf9, 0x97, 0xc3, 0x57, 0x1a, 0x0f, 0xb5, 0x48, 0x5f, 0xa9, 0x68, 0x42, 0x45, 0x43, 0xb8, - 0x66, 0x79, 0x9c, 0x5a, 0xb2, 0x34, 0x8f, 0x6b, 0x36, 0x16, 0xef, 0x56, 0xae, 0x59, 0x5f, 0x5c, - 0x40, 0x96, 0x34, 0x9a, 0x9f, 0xfc, 0xd6, 0x77, 0x2f, 0x3f, 0xf2, 0xed, 0xef, 0x5e, 0x7e, 0xe4, - 0x3b, 0xdf, 0xbd, 0xfc, 0xc8, 0xe7, 0x0f, 0x2f, 0x17, 0xbe, 0x75, 0x78, 0xb9, 0xf0, 0xed, 0xc3, - 0xcb, 0x85, 0xef, 0x1c, 0x5e, 0x2e, 0xfc, 0xdd, 0xe1, 0xe5, 0xc2, 0xaf, 0xfc, 0xfd, 0xe5, 0x47, - 0x3e, 0xf1, 0x7c, 0x34, 0x85, 0x39, 0x35, 0x85, 0x39, 0x45, 0x70, 0xae, 0xd7, 0x69, 0xcf, 0xb1, - 0x29, 0x44, 0x2d, 0x6a, 0x0a, 0xff, 0x19, 0x00, 0x00, 0xff, 0xff, 0x2c, 0xe6, 0x2a, 0xfb, 0x10, - 0x86, 0x00, 0x00, + // 7423 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0xc7, + 0x75, 0xae, 0xe6, 0x8f, 0x33, 0x73, 0x86, 0xe4, 0xee, 0xd6, 0x4a, 0x2b, 0xee, 0x6a, 0xb5, 0x5c, + 0xb7, 0xae, 0x74, 0xd7, 0xd7, 0x36, 0x79, 0xc5, 0xab, 0x3f, 0xfb, 0xda, 0x96, 0x38, 0xe4, 0x92, + 0x4b, 0x2d, 0xb9, 0x4b, 0x9f, 0x21, 0x57, 0xb2, 0x75, 0x6d, 0xdd, 0x66, 0x77, 0x71, 0xd8, 0x62, + 0x4f, 0xf7, 0xa8, 0xbb, 0x87, 0xbb, 0x94, 0xaf, 0xe1, 0xbf, 0x07, 0xe9, 0x22, 0x09, 0x12, 0xf8, + 0xc9, 0x40, 0xe0, 0x04, 0x09, 0x02, 0xf8, 0xc1, 0x70, 0x1e, 0x02, 0x28, 0x0f, 0x01, 0xf2, 0x07, + 0x04, 0x89, 0x13, 0xe4, 0xc7, 0x0f, 0x01, 0xa2, 0x20, 0x00, 0x11, 0x33, 0xc8, 0x43, 0x12, 0xc4, + 0x30, 0x62, 0x20, 0xb6, 0x17, 0x06, 0x1c, 0xd4, 0x5f, 0xff, 0x4d, 0xcf, 0x2e, 0x39, 0x4d, 0xae, + 0x56, 0x89, 0xde, 0xba, 0xab, 0x4e, 0x7d, 0xa7, 0xfa, 0xd4, 0xcf, 0x39, 0x75, 0xea, 0x54, 0x35, + 0x2c, 0xb6, 0xad, 0x60, 0xab, 0xb7, 0x31, 0x65, 0xb8, 0x9d, 0x69, 0xa7, 0xd7, 0xd1, 0xbb, 0x9e, + 0xfb, 0x1a, 0x7f, 0xd8, 0xb4, 0xdd, 0x9b, 0xd3, 0xdd, 0xed, 0xf6, 0xb4, 0xde, 0xb5, 0xfc, 0x28, + 0x65, 0xe7, 0x49, 0xdd, 0xee, 0x6e, 0xe9, 0x4f, 0x4e, 0xb7, 0xa9, 0x43, 0x3d, 0x3d, 0xa0, 0xe6, + 0x54, 0xd7, 0x73, 0x03, 0x97, 0x3c, 0x1b, 0x01, 0x4d, 0x29, 0xa0, 0x29, 0x55, 0x6c, 0xaa, 0xbb, + 0xdd, 0x9e, 0x62, 0x40, 0x51, 0x8a, 0x02, 0x3a, 0xf7, 0x91, 0x58, 0x0d, 0xda, 0x6e, 0xdb, 0x9d, + 0xe6, 0x78, 0x1b, 0xbd, 0x4d, 0xfe, 0xc6, 0x5f, 0xf8, 0x93, 0xe0, 0x73, 0x4e, 0xdb, 0x7e, 0xce, + 0x9f, 0xb2, 0x5c, 0x56, 0xad, 0x69, 0xc3, 0xf5, 0xe8, 0xf4, 0x4e, 0x5f, 0x5d, 0xce, 0x3d, 0x15, + 0xd1, 0x74, 0x74, 0x63, 0xcb, 0x72, 0xa8, 0xb7, 0xab, 0xbe, 0x65, 0xda, 0xa3, 0xbe, 0xdb, 0xf3, + 0x0c, 0x7a, 0xa8, 0x52, 0xfe, 0x74, 0x87, 0x06, 0x7a, 0x16, 0xaf, 0xe9, 0x41, 0xa5, 0xbc, 0x9e, + 0x13, 0x58, 0x9d, 0x7e, 0x36, 0xcf, 0xdc, 0xad, 0x80, 0x6f, 0x6c, 0xd1, 0x8e, 0x9e, 0x2e, 0xa7, + 0xfd, 0x5d, 0x1d, 0x4e, 0xcf, 0x6e, 0xf8, 0x81, 0xa7, 0x1b, 0xc1, 0xaa, 0x6b, 0xae, 0xd1, 0x4e, + 0xd7, 0xd6, 0x03, 0x4a, 0xb6, 0xa1, 0xc6, 0xea, 0x66, 0xea, 0x81, 0x3e, 0x51, 0xb8, 0x58, 0xb8, + 0xd4, 0x98, 0x99, 0x9d, 0x1a, 0xb2, 0x2d, 0xa6, 0x56, 0x24, 0x50, 0x73, 0x74, 0x7f, 0x6f, 0xb2, + 0xa6, 0xde, 0x30, 0x64, 0x40, 0xbe, 0x5e, 0x80, 0x51, 0xc7, 0x35, 0x69, 0x8b, 0xda, 0xd4, 0x08, + 0x5c, 0x6f, 0xa2, 0x78, 0xb1, 0x74, 0xa9, 0x31, 0xf3, 0xb9, 0xa1, 0x39, 0x66, 0x7c, 0xd1, 0xd4, + 0xb5, 0x18, 0x83, 0xcb, 0x4e, 0xe0, 0xed, 0x36, 0x1f, 0xfc, 0xce, 0xde, 0xe4, 0x03, 0xfb, 0x7b, + 0x93, 0xa3, 0xf1, 0x2c, 0x4c, 0xd4, 0x84, 0xac, 0x43, 0x23, 0x70, 0x6d, 0x26, 0x32, 0xcb, 0x75, + 0xfc, 0x89, 0x12, 0xaf, 0xd8, 0x85, 0x29, 0x21, 0x6d, 0xc6, 0x7e, 0x8a, 0x75, 0x97, 0xa9, 0x9d, + 0x27, 0xa7, 0xd6, 0x42, 0xb2, 0xe6, 0x69, 0x09, 0xdc, 0x88, 0xd2, 0x7c, 0x8c, 0xe3, 0x10, 0x0a, + 0x27, 0x7c, 0x6a, 0xf4, 0x3c, 0x2b, 0xd8, 0x9d, 0x73, 0x9d, 0x80, 0xde, 0x0a, 0x26, 0xca, 0x5c, + 0xca, 0x4f, 0x64, 0x41, 0xaf, 0xba, 0x66, 0x2b, 0x49, 0xdd, 0x3c, 0xbd, 0xbf, 0x37, 0x79, 0x22, + 0x95, 0x88, 0x69, 0x4c, 0xe2, 0xc0, 0x49, 0xab, 0xa3, 0xb7, 0xe9, 0x6a, 0xcf, 0xb6, 0x5b, 0xd4, + 0xf0, 0x68, 0xe0, 0x4f, 0x54, 0xf8, 0x27, 0x5c, 0xca, 0xe2, 0xb3, 0xec, 0x1a, 0xba, 0x7d, 0x7d, + 0xe3, 0x35, 0x6a, 0x04, 0x48, 0x37, 0xa9, 0x47, 0x1d, 0x83, 0x36, 0x27, 0xe4, 0xc7, 0x9c, 0x5c, + 0x4a, 0x21, 0x61, 0x1f, 0x36, 0x59, 0x84, 0x53, 0x5d, 0xcf, 0x72, 0x79, 0x15, 0x6c, 0xdd, 0xf7, + 0xaf, 0xe9, 0x1d, 0x3a, 0x31, 0x72, 0xb1, 0x70, 0xa9, 0xde, 0x3c, 0x2b, 0x61, 0x4e, 0xad, 0xa6, + 0x09, 0xb0, 0xbf, 0x0c, 0xb9, 0x04, 0x35, 0x95, 0x38, 0x51, 0xbd, 0x58, 0xb8, 0x54, 0x11, 0x7d, + 0x47, 0x95, 0xc5, 0x30, 0x97, 0x2c, 0x40, 0x4d, 0xdf, 0xdc, 0xb4, 0x1c, 0x46, 0x59, 0xe3, 0x22, + 0x3c, 0x9f, 0xf5, 0x69, 0xb3, 0x92, 0x46, 0xe0, 0xa8, 0x37, 0x0c, 0xcb, 0x92, 0x17, 0x81, 0xf8, + 0xd4, 0xdb, 0xb1, 0x0c, 0x3a, 0x6b, 0x18, 0x6e, 0xcf, 0x09, 0x78, 0xdd, 0xeb, 0xbc, 0xee, 0xe7, + 0x64, 0xdd, 0x49, 0xab, 0x8f, 0x02, 0x33, 0x4a, 0x91, 0x17, 0xe0, 0xa4, 0x1c, 0x76, 0x91, 0x14, + 0x80, 0x23, 0x3d, 0xc8, 0x04, 0x89, 0xa9, 0x3c, 0xec, 0xa3, 0x26, 0x26, 0x9c, 0xd7, 0x7b, 0x81, + 0xdb, 0x61, 0x90, 0x49, 0xa6, 0x6b, 0xee, 0x36, 0x75, 0x26, 0x1a, 0x17, 0x0b, 0x97, 0x6a, 0xcd, + 0x8b, 0xfb, 0x7b, 0x93, 0xe7, 0x67, 0xef, 0x40, 0x87, 0x77, 0x44, 0x21, 0xd7, 0xa1, 0x6e, 0x3a, + 0xfe, 0xaa, 0x6b, 0x5b, 0xc6, 0xee, 0xc4, 0x28, 0xaf, 0xe0, 0x93, 0xf2, 0x53, 0xeb, 0xf3, 0xd7, + 0x5a, 0x22, 0xe3, 0xf6, 0xde, 0xe4, 0xf9, 0xfe, 0xd9, 0x71, 0x2a, 0xcc, 0xc7, 0x08, 0x83, 0xac, + 0x70, 0xc0, 0x39, 0xd7, 0xd9, 0xb4, 0xda, 0x13, 0x63, 0xbc, 0x35, 0x2e, 0x0e, 0xe8, 0xd0, 0xf3, + 0xd7, 0x5a, 0x82, 0xae, 0x39, 0x26, 0xd9, 0x89, 0x57, 0x8c, 0x10, 0xce, 0x3d, 0x0f, 0xa7, 0xfa, + 0x46, 0x2d, 0x39, 0x09, 0xa5, 0x6d, 0xba, 0xcb, 0x27, 0xa5, 0x3a, 0xb2, 0x47, 0xf2, 0x20, 0x54, + 0x76, 0x74, 0xbb, 0x47, 0x27, 0x8a, 0x3c, 0x4d, 0xbc, 0x7c, 0xac, 0xf8, 0x5c, 0x41, 0xfb, 0xf5, + 0x12, 0x8c, 0xaa, 0xb9, 0xa0, 0x65, 0x39, 0xdb, 0xe4, 0x25, 0x28, 0xd9, 0x6e, 0x5b, 0xce, 0x68, + 0x1f, 0x1f, 0x7a, 0x7e, 0x59, 0x76, 0xdb, 0xcd, 0xea, 0xfe, 0xde, 0x64, 0x69, 0xd9, 0x6d, 0x23, + 0x43, 0x24, 0x06, 0x54, 0xb6, 0xf5, 0xcd, 0x6d, 0x9d, 0xd7, 0xa1, 0x31, 0xd3, 0x1c, 0x1a, 0xfa, + 0x2a, 0x43, 0x61, 0x75, 0x6d, 0xd6, 0xf7, 0xf7, 0x26, 0x2b, 0xfc, 0x15, 0x05, 0x36, 0x71, 0xa1, + 0xbe, 0x61, 0xeb, 0xc6, 0xf6, 0x96, 0x6b, 0xd3, 0x89, 0x52, 0x4e, 0x46, 0x4d, 0x85, 0x24, 0x1a, + 0x20, 0x7c, 0xc5, 0x88, 0x07, 0x31, 0x60, 0xa4, 0x67, 0xfa, 0x96, 0xb3, 0x2d, 0x67, 0xa7, 0xe7, + 0x87, 0xe6, 0xb6, 0x3e, 0xcf, 0xbf, 0x09, 0xf6, 0xf7, 0x26, 0x47, 0xc4, 0x33, 0x4a, 0x68, 0xed, + 0xfb, 0x0d, 0x18, 0x57, 0x8d, 0x74, 0x83, 0x7a, 0x01, 0xbd, 0x45, 0x2e, 0x42, 0xd9, 0x61, 0x83, + 0x86, 0x37, 0x72, 0x73, 0x54, 0xf6, 0xc9, 0x32, 0x1f, 0x2c, 0x3c, 0x87, 0xd5, 0x4c, 0x28, 0x5c, + 0x29, 0xf0, 0xe1, 0x6b, 0xd6, 0xe2, 0x30, 0xa2, 0x66, 0xe2, 0x19, 0x25, 0x34, 0x79, 0x05, 0xca, + 0xfc, 0xe3, 0x85, 0xa8, 0x3f, 0x31, 0x3c, 0x0b, 0xf6, 0xe9, 0x35, 0xf6, 0x05, 0xfc, 0xc3, 0x39, + 0x28, 0xeb, 0x8a, 0x3d, 0x73, 0x53, 0x0a, 0xf6, 0xe3, 0x39, 0x04, 0xbb, 0x20, 0xba, 0xe2, 0xfa, + 0xfc, 0x02, 0x32, 0x44, 0xf2, 0x8b, 0x05, 0x38, 0x65, 0xb8, 0x4e, 0xa0, 0x33, 0x23, 0x40, 0xa9, + 0xbf, 0x89, 0x0a, 0xe7, 0xf3, 0xe2, 0xd0, 0x7c, 0xe6, 0xd2, 0x88, 0xcd, 0x87, 0xd8, 0x6c, 0xde, + 0x97, 0x8c, 0xfd, 0xbc, 0xc9, 0x2f, 0x17, 0xe0, 0x21, 0x36, 0xcb, 0xf6, 0x11, 0x73, 0xdd, 0x70, + 0xb4, 0xb5, 0x3a, 0xbb, 0xbf, 0x37, 0xf9, 0xd0, 0x52, 0x16, 0x33, 0xcc, 0xae, 0x03, 0xab, 0xdd, + 0x69, 0xbd, 0xdf, 0x60, 0xe0, 0x7a, 0xa7, 0x31, 0xb3, 0x7c, 0x94, 0x46, 0x48, 0xf3, 0x11, 0xd9, + 0x95, 0xb3, 0x6c, 0x2e, 0xcc, 0xaa, 0x05, 0xb9, 0x0c, 0xd5, 0x1d, 0xd7, 0xee, 0x75, 0xa8, 0x3f, + 0x51, 0xe3, 0x9a, 0xfb, 0x5c, 0xd6, 0x84, 0x7a, 0x83, 0x93, 0x34, 0x4f, 0x48, 0xf8, 0xaa, 0x78, + 0xf7, 0x51, 0x95, 0x25, 0x16, 0x8c, 0xd8, 0x56, 0xc7, 0x0a, 0x7c, 0xae, 0xd2, 0x1a, 0x33, 0x97, + 0x87, 0xfe, 0x2c, 0x31, 0x44, 0x97, 0x39, 0x98, 0x18, 0x35, 0xe2, 0x19, 0x25, 0x03, 0x36, 0x15, + 0xfa, 0x86, 0x6e, 0x0b, 0x95, 0xd7, 0x98, 0xf9, 0xe4, 0xf0, 0xc3, 0x86, 0xa1, 0x34, 0xc7, 0xe4, + 0x37, 0x55, 0xf8, 0x2b, 0x0a, 0x6c, 0xf2, 0x59, 0x18, 0x4f, 0xb4, 0xa6, 0x3f, 0xd1, 0xe0, 0xd2, + 0x79, 0x34, 0x4b, 0x3a, 0x21, 0x55, 0xf3, 0x8c, 0x04, 0x1b, 0x4f, 0xf4, 0x10, 0x1f, 0x53, 0x60, + 0xe4, 0x2a, 0xd4, 0x7c, 0xcb, 0xa4, 0x86, 0xee, 0xf9, 0x13, 0xa3, 0x07, 0x01, 0x3e, 0x29, 0x81, + 0x6b, 0x2d, 0x59, 0x0c, 0x43, 0x00, 0x32, 0x05, 0xd0, 0xd5, 0xbd, 0xc0, 0x12, 0x26, 0xe4, 0x18, + 0x37, 0x67, 0xc6, 0xf7, 0xf7, 0x26, 0x61, 0x35, 0x4c, 0xc5, 0x18, 0x05, 0xa3, 0x67, 0x65, 0x97, + 0x9c, 0x6e, 0x2f, 0xf0, 0x27, 0xc6, 0x2f, 0x96, 0x2e, 0xd5, 0x05, 0x7d, 0x2b, 0x4c, 0xc5, 0x18, + 0x05, 0xf9, 0x76, 0x01, 0x1e, 0x89, 0x5e, 0xfb, 0x07, 0xd9, 0x89, 0x23, 0x1f, 0x64, 0x93, 0xfb, + 0x7b, 0x93, 0x8f, 0xb4, 0x06, 0xb3, 0xc4, 0x3b, 0xd5, 0x47, 0x7b, 0x09, 0xc6, 0x66, 0x7b, 0xc1, + 0x96, 0xeb, 0x59, 0x6f, 0x70, 0x73, 0x98, 0x2c, 0x40, 0x25, 0xe0, 0x66, 0x8d, 0xd0, 0xcb, 0x8f, + 0x67, 0x89, 0x5a, 0x98, 0x98, 0x57, 0xe9, 0xae, 0xb2, 0x06, 0x84, 0x7e, 0x14, 0x66, 0x8e, 0x28, + 0xae, 0xfd, 0x5a, 0x01, 0xea, 0x4d, 0xdd, 0xb7, 0x0c, 0x06, 0x4f, 0xe6, 0xa0, 0xdc, 0xf3, 0xa9, + 0x77, 0x38, 0x50, 0x3e, 0x4b, 0xaf, 0xfb, 0xd4, 0x43, 0x5e, 0x98, 0x5c, 0x87, 0x5a, 0x57, 0xf7, + 0xfd, 0x9b, 0xae, 0x67, 0x4a, 0x4d, 0x73, 0x40, 0x20, 0x61, 0xaf, 0xca, 0xa2, 0x18, 0x82, 0x68, + 0x0d, 0x88, 0x54, 0xad, 0xf6, 0xc3, 0x02, 0x9c, 0x6e, 0xf6, 0x36, 0x37, 0xa9, 0x27, 0xcd, 0x33, + 0x61, 0xf8, 0x10, 0x0a, 0x15, 0x8f, 0x9a, 0x96, 0x2f, 0xeb, 0x3e, 0x3f, 0x74, 0xd3, 0x21, 0x43, + 0x91, 0x76, 0x16, 0x97, 0x17, 0x4f, 0x40, 0x81, 0x4e, 0x7a, 0x50, 0x7f, 0x8d, 0x06, 0x7e, 0xe0, + 0x51, 0xbd, 0x23, 0xbf, 0xee, 0xca, 0xd0, 0xac, 0x5e, 0xa4, 0x41, 0x8b, 0x23, 0xc5, 0xcd, 0xba, + 0x30, 0x11, 0x23, 0x4e, 0xda, 0x1f, 0x56, 0x60, 0x74, 0xce, 0xed, 0x6c, 0x58, 0x0e, 0x35, 0x2f, + 0x9b, 0x6d, 0x4a, 0x5e, 0x85, 0x32, 0x35, 0xdb, 0x54, 0x7e, 0xed, 0xf0, 0x7a, 0x96, 0x81, 0x45, + 0xd6, 0x02, 0x7b, 0x43, 0x0e, 0x4c, 0x96, 0x61, 0x7c, 0xd3, 0x73, 0x3b, 0x62, 0xea, 0x5a, 0xdb, + 0xed, 0x4a, 0x53, 0xb1, 0xf9, 0xdf, 0xd4, 0x74, 0xb0, 0x90, 0xc8, 0xbd, 0xbd, 0x37, 0x09, 0xd1, + 0x1b, 0xa6, 0xca, 0x92, 0x97, 0x61, 0x22, 0x4a, 0x09, 0xc7, 0xf0, 0x1c, 0xb3, 0xab, 0xb9, 0xa9, + 0x50, 0x69, 0x9e, 0xdf, 0xdf, 0x9b, 0x9c, 0x58, 0x18, 0x40, 0x83, 0x03, 0x4b, 0x93, 0x37, 0x0b, + 0x70, 0x32, 0xca, 0x14, 0xf3, 0xaa, 0xb4, 0x10, 0x8e, 0x68, 0xc2, 0xe6, 0x0b, 0x90, 0x85, 0x14, + 0x0b, 0xec, 0x63, 0x4a, 0x16, 0x60, 0x34, 0x70, 0x63, 0xf2, 0xaa, 0x70, 0x79, 0x69, 0x6a, 0xc5, + 0xbc, 0xe6, 0x0e, 0x94, 0x56, 0xa2, 0x1c, 0x41, 0x38, 0xa3, 0xde, 0x53, 0x92, 0x1a, 0xe1, 0x92, + 0x3a, 0xb7, 0xbf, 0x37, 0x79, 0x66, 0x2d, 0x93, 0x02, 0x07, 0x94, 0x24, 0x5f, 0x2e, 0xc0, 0xb8, + 0xca, 0x92, 0x32, 0xaa, 0x1e, 0xa5, 0x8c, 0x08, 0xeb, 0x11, 0x6b, 0x09, 0x06, 0x98, 0x62, 0xa8, + 0xfd, 0xb8, 0x0c, 0xf5, 0x70, 0x66, 0x23, 0x8f, 0x41, 0x85, 0xaf, 0x85, 0xa5, 0xc1, 0x1a, 0xaa, + 0x2c, 0xbe, 0x64, 0x46, 0x91, 0x47, 0x1e, 0x87, 0xaa, 0xe1, 0x76, 0x3a, 0xba, 0x63, 0x72, 0xff, + 0x46, 0xbd, 0xd9, 0x60, 0x9a, 0x7a, 0x4e, 0x24, 0xa1, 0xca, 0x23, 0xe7, 0xa1, 0xac, 0x7b, 0x6d, + 0xe1, 0x6a, 0xa8, 0x8b, 0xf9, 0x68, 0xd6, 0x6b, 0xfb, 0xc8, 0x53, 0xc9, 0x47, 0xa1, 0x44, 0x9d, + 0x9d, 0x89, 0xf2, 0x60, 0x53, 0xe0, 0xb2, 0xb3, 0x73, 0x43, 0xf7, 0x9a, 0x0d, 0x59, 0x87, 0xd2, + 0x65, 0x67, 0x07, 0x59, 0x19, 0xb2, 0x0c, 0x55, 0xea, 0xec, 0xb0, 0xb6, 0x97, 0x3e, 0x80, 0x0f, + 0x0c, 0x28, 0xce, 0x48, 0xa4, 0x55, 0x1c, 0x1a, 0x14, 0x32, 0x19, 0x15, 0x04, 0xf9, 0x34, 0x8c, + 0x0a, 0xdb, 0x62, 0x85, 0xb5, 0x89, 0x3f, 0x31, 0xc2, 0x21, 0x27, 0x07, 0x1b, 0x27, 0x9c, 0x2e, + 0xf2, 0xb9, 0xc4, 0x12, 0x7d, 0x4c, 0x40, 0x91, 0x4f, 0x43, 0x5d, 0xb9, 0xd3, 0x54, 0xcb, 0x66, + 0xba, 0x2b, 0x50, 0x12, 0x21, 0x7d, 0xbd, 0x67, 0x79, 0xb4, 0x43, 0x9d, 0xc0, 0x6f, 0x9e, 0x52, + 0x0b, 0x58, 0x95, 0xeb, 0x63, 0x84, 0x46, 0x36, 0xfa, 0xfd, 0x2e, 0xc2, 0x69, 0xf0, 0xd8, 0x80, + 0x59, 0x7d, 0x08, 0xa7, 0xcb, 0xe7, 0xe0, 0x44, 0xe8, 0x18, 0x91, 0x6b, 0x6b, 0xe1, 0x46, 0x78, + 0x8a, 0x15, 0x5f, 0x4a, 0x66, 0xdd, 0xde, 0x9b, 0x7c, 0x34, 0x63, 0x75, 0x1d, 0x11, 0x60, 0x1a, + 0x4c, 0xfb, 0xfd, 0x12, 0xf4, 0x9b, 0xdd, 0x49, 0xa1, 0x15, 0x8e, 0x5a, 0x68, 0xe9, 0x0f, 0x12, + 0xd3, 0xe7, 0x73, 0xb2, 0x58, 0xfe, 0x8f, 0xca, 0x6a, 0x98, 0xd2, 0x51, 0x37, 0xcc, 0xfd, 0x32, + 0x76, 0xb4, 0xb7, 0xca, 0x30, 0x3e, 0xaf, 0xd3, 0x8e, 0xeb, 0xdc, 0x75, 0x11, 0x52, 0xb8, 0x2f, + 0x16, 0x21, 0x97, 0xa0, 0xe6, 0xd1, 0xae, 0x6d, 0x19, 0xba, 0xcf, 0x9b, 0x5e, 0xba, 0xe3, 0x50, + 0xa6, 0x61, 0x98, 0x3b, 0x60, 0xf1, 0x59, 0xba, 0x2f, 0x17, 0x9f, 0xe5, 0x77, 0x7f, 0xf1, 0xa9, + 0x7d, 0xb9, 0x08, 0xdc, 0x50, 0x21, 0x17, 0xa1, 0xcc, 0x94, 0x70, 0xda, 0xe5, 0xc1, 0x3b, 0x0e, + 0xcf, 0x21, 0xe7, 0xa0, 0x18, 0xb8, 0x72, 0xe4, 0x81, 0xcc, 0x2f, 0xae, 0xb9, 0x58, 0x0c, 0x5c, + 0xf2, 0x06, 0x80, 0xe1, 0x3a, 0xa6, 0xa5, 0xbc, 0xd4, 0xf9, 0x3e, 0x6c, 0xc1, 0xf5, 0x6e, 0xea, + 0x9e, 0x39, 0x17, 0x22, 0x8a, 0xe5, 0x47, 0xf4, 0x8e, 0x31, 0x6e, 0xe4, 0x79, 0x18, 0x71, 0x9d, + 0x85, 0x9e, 0x6d, 0x73, 0x81, 0xd6, 0x9b, 0xff, 0x9d, 0xad, 0x09, 0xaf, 0xf3, 0x94, 0xdb, 0x7b, + 0x93, 0x67, 0x85, 0x7d, 0xcb, 0xde, 0x5e, 0xf2, 0xac, 0xc0, 0x72, 0xda, 0xad, 0xc0, 0xd3, 0x03, + 0xda, 0xde, 0x45, 0x59, 0x4c, 0xfb, 0x5a, 0x01, 0x1a, 0x0b, 0xd6, 0x2d, 0x6a, 0xbe, 0x64, 0x39, + 0xa6, 0x7b, 0x93, 0x20, 0x8c, 0xd8, 0xd4, 0x69, 0x07, 0x5b, 0xb2, 0xf7, 0x4f, 0xc5, 0xc6, 0x5a, + 0xb8, 0xb9, 0x11, 0xd5, 0xbf, 0x43, 0x03, 0x9d, 0x8d, 0xbe, 0xf9, 0x9e, 0x74, 0xbf, 0x8b, 0x45, + 0x29, 0x47, 0x40, 0x89, 0x44, 0xa6, 0xa1, 0x2e, 0xac, 0x4f, 0xcb, 0x69, 0x73, 0x19, 0xd6, 0xa2, + 0x49, 0xaf, 0xa5, 0x32, 0x30, 0xa2, 0xd1, 0x76, 0xe1, 0x54, 0x9f, 0x18, 0x88, 0x09, 0xe5, 0x40, + 0x6f, 0xab, 0xf9, 0x75, 0x61, 0x68, 0x01, 0xaf, 0xe9, 0xed, 0x98, 0x70, 0xb9, 0x8e, 0x5f, 0xd3, + 0x99, 0x8e, 0x67, 0xe8, 0xda, 0x4f, 0x0b, 0x50, 0x5b, 0xe8, 0x39, 0x06, 0x5f, 0x1b, 0xdd, 0xdd, + 0x15, 0xa6, 0x0c, 0x86, 0x62, 0xa6, 0xc1, 0xd0, 0x83, 0x91, 0xed, 0x9b, 0xa1, 0x41, 0xd1, 0x98, + 0x59, 0x19, 0xbe, 0x57, 0xc8, 0x2a, 0x4d, 0x5d, 0xe5, 0x78, 0x62, 0x0f, 0x65, 0x5c, 0x56, 0x68, + 0xe4, 0xea, 0x4b, 0x9c, 0xa9, 0x64, 0x76, 0xee, 0xa3, 0xd0, 0x88, 0x91, 0x1d, 0xca, 0x69, 0xfb, + 0xdb, 0x65, 0x18, 0x59, 0x6c, 0xb5, 0x66, 0x57, 0x97, 0xc8, 0xd3, 0xd0, 0x90, 0xee, 0xf5, 0x6b, + 0x91, 0x0c, 0xc2, 0xdd, 0x95, 0x56, 0x94, 0x85, 0x71, 0x3a, 0x66, 0x8e, 0x79, 0x54, 0xb7, 0x3b, + 0x72, 0xb0, 0x84, 0xe6, 0x18, 0xb2, 0x44, 0x14, 0x79, 0x44, 0x87, 0x71, 0xb6, 0xc2, 0x63, 0x22, + 0x14, 0xab, 0x37, 0x39, 0x6c, 0x0e, 0xb8, 0xbe, 0xe3, 0x46, 0xe2, 0x7a, 0x02, 0x00, 0x53, 0x80, + 0xe4, 0x39, 0xa8, 0xe9, 0xbd, 0x60, 0x8b, 0x1b, 0xd0, 0x62, 0x6c, 0x9c, 0xe7, 0xbb, 0x0f, 0x32, + 0xed, 0xf6, 0xde, 0xe4, 0xe8, 0x55, 0x6c, 0x3e, 0xad, 0xde, 0x31, 0xa4, 0x66, 0x95, 0x53, 0x2b, + 0x46, 0x59, 0xb9, 0xca, 0xa1, 0x2b, 0xb7, 0x9a, 0x00, 0xc0, 0x14, 0x20, 0x79, 0x05, 0x46, 0xb7, + 0xe9, 0x6e, 0xa0, 0x6f, 0x48, 0x06, 0x23, 0x87, 0x61, 0x70, 0x92, 0x99, 0x70, 0x57, 0x63, 0xc5, + 0x31, 0x01, 0x46, 0x7c, 0x78, 0x70, 0x9b, 0x7a, 0x1b, 0xd4, 0x73, 0xe5, 0xea, 0x53, 0x32, 0xa9, + 0x1e, 0x86, 0xc9, 0xc4, 0xfe, 0xde, 0xe4, 0x83, 0x57, 0x33, 0x60, 0x30, 0x13, 0x5c, 0xfb, 0x49, + 0x11, 0x4e, 0x2c, 0x8a, 0xfd, 0x4d, 0xd7, 0x13, 0x4a, 0x98, 0x9c, 0x85, 0x92, 0xd7, 0xed, 0xf1, + 0x9e, 0x53, 0x12, 0x7e, 0x52, 0x5c, 0x5d, 0x47, 0x96, 0x46, 0x5e, 0x86, 0x9a, 0x29, 0xa7, 0x0c, + 0xb9, 0xf8, 0x3d, 0xec, 0x44, 0xc3, 0x95, 0xa0, 0x7a, 0xc3, 0x10, 0x8d, 0x59, 0xfa, 0x1d, 0xbf, + 0xdd, 0xb2, 0xde, 0xa0, 0x72, 0x3d, 0xc8, 0x2d, 0xfd, 0x15, 0x91, 0x84, 0x2a, 0x8f, 0x69, 0xd5, + 0x6d, 0xba, 0x2b, 0x56, 0x43, 0xe5, 0x48, 0xab, 0x5e, 0x95, 0x69, 0x18, 0xe6, 0x92, 0x49, 0x35, + 0x58, 0x58, 0x2f, 0x28, 0x8b, 0x95, 0xfc, 0x0d, 0x96, 0x20, 0xc7, 0x0d, 0x9b, 0x32, 0x5f, 0xb3, + 0x82, 0x80, 0x7a, 0xb2, 0x19, 0x87, 0x9a, 0x32, 0x5f, 0xe4, 0x08, 0x28, 0x91, 0xc8, 0x87, 0xa0, + 0xce, 0xc1, 0x9b, 0xb6, 0xbb, 0xc1, 0x1b, 0xae, 0x2e, 0xd6, 0xf4, 0x37, 0x54, 0x22, 0x46, 0xf9, + 0xda, 0xcf, 0x8a, 0x70, 0x66, 0x91, 0x06, 0xc2, 0xaa, 0x99, 0xa7, 0x5d, 0xdb, 0xdd, 0x65, 0xa6, + 0x25, 0xd2, 0xd7, 0xc9, 0x0b, 0x00, 0x96, 0xbf, 0xd1, 0xda, 0x31, 0xf8, 0x38, 0x10, 0x63, 0xf8, + 0xa2, 0x1c, 0x92, 0xb0, 0xd4, 0x6a, 0xca, 0x9c, 0xdb, 0x89, 0x37, 0x8c, 0x95, 0x89, 0x96, 0x57, + 0xc5, 0x3b, 0x2c, 0xaf, 0x5a, 0x00, 0xdd, 0xc8, 0x40, 0x2d, 0x71, 0xca, 0xff, 0xa5, 0xd8, 0x1c, + 0xc6, 0x36, 0x8d, 0xc1, 0xe4, 0x31, 0x19, 0x1d, 0x38, 0x69, 0xd2, 0x4d, 0xbd, 0x67, 0x07, 0xa1, + 0x51, 0x2d, 0x07, 0xf1, 0xc1, 0xed, 0xf2, 0x70, 0xef, 0x75, 0x3e, 0x85, 0x84, 0x7d, 0xd8, 0xda, + 0xef, 0x94, 0xe0, 0xdc, 0x22, 0x0d, 0x42, 0x8f, 0x8b, 0x9c, 0x1d, 0x5b, 0x5d, 0x6a, 0xb0, 0x56, + 0x78, 0xb3, 0x00, 0x23, 0xb6, 0xbe, 0x41, 0x6d, 0xa6, 0xbd, 0xd8, 0xd7, 0xbc, 0x3a, 0xb4, 0x22, + 0x18, 0xcc, 0x65, 0x6a, 0x99, 0x73, 0x48, 0xa9, 0x06, 0x91, 0x88, 0x92, 0x3d, 0x9b, 0xd4, 0x0d, + 0xbb, 0xe7, 0x07, 0xd4, 0x5b, 0x75, 0xbd, 0x40, 0xda, 0x93, 0xe1, 0xa4, 0x3e, 0x17, 0x65, 0x61, + 0x9c, 0x8e, 0xcc, 0x00, 0x18, 0xb6, 0x45, 0x9d, 0x80, 0x97, 0x12, 0xe3, 0x8a, 0xa8, 0xf6, 0x9d, + 0x0b, 0x73, 0x30, 0x46, 0xc5, 0x58, 0x75, 0x5c, 0xc7, 0x0a, 0x5c, 0xc1, 0xaa, 0x9c, 0x64, 0xb5, + 0x12, 0x65, 0x61, 0x9c, 0x8e, 0x17, 0xa3, 0x81, 0x67, 0x19, 0x3e, 0x2f, 0x56, 0x49, 0x15, 0x8b, + 0xb2, 0x30, 0x4e, 0xc7, 0x74, 0x5e, 0xec, 0xfb, 0x0f, 0xa5, 0xf3, 0xbe, 0x55, 0x87, 0x0b, 0x09, + 0xb1, 0x06, 0x7a, 0x40, 0x37, 0x7b, 0x76, 0x8b, 0x06, 0xaa, 0x01, 0x87, 0xd4, 0x85, 0x3f, 0x17, + 0xb5, 0xbb, 0x88, 0xaa, 0x30, 0x8e, 0xa6, 0xdd, 0xfb, 0x2a, 0x78, 0xa0, 0xb6, 0x9f, 0x86, 0xba, + 0xa3, 0x07, 0x3e, 0x1f, 0xb8, 0x72, 0x8c, 0x86, 0x66, 0xd8, 0x35, 0x95, 0x81, 0x11, 0x0d, 0x59, + 0x85, 0x07, 0xa5, 0x88, 0x2f, 0xdf, 0xea, 0xba, 0x5e, 0x40, 0x3d, 0x51, 0x56, 0xaa, 0x53, 0x59, + 0xf6, 0xc1, 0x95, 0x0c, 0x1a, 0xcc, 0x2c, 0x49, 0x56, 0xe0, 0xb4, 0x21, 0x76, 0x9a, 0xa9, 0xed, + 0xea, 0xa6, 0x02, 0x14, 0x0e, 0xae, 0x70, 0x69, 0x34, 0xd7, 0x4f, 0x82, 0x59, 0xe5, 0xd2, 0xbd, + 0x79, 0x64, 0xa8, 0xde, 0x5c, 0x1d, 0xa6, 0x37, 0xd7, 0x86, 0xeb, 0xcd, 0xf5, 0x83, 0xf5, 0x66, + 0x26, 0x79, 0xd6, 0x8f, 0xa8, 0xc7, 0xcc, 0x13, 0xa1, 0x61, 0x63, 0x81, 0x0c, 0xa1, 0xe4, 0x5b, + 0x19, 0x34, 0x98, 0x59, 0x92, 0x6c, 0xc0, 0x39, 0x91, 0x7e, 0xd9, 0x31, 0xbc, 0xdd, 0x2e, 0x53, + 0x3c, 0x31, 0xdc, 0x46, 0xc2, 0xc3, 0x78, 0xae, 0x35, 0x90, 0x12, 0xef, 0x80, 0x42, 0xfe, 0x37, + 0x8c, 0x89, 0x56, 0x5a, 0xd1, 0xbb, 0x1c, 0x56, 0x84, 0x35, 0x3c, 0x24, 0x61, 0xc7, 0xe6, 0xe2, + 0x99, 0x98, 0xa4, 0x25, 0xb3, 0x70, 0xa2, 0xbb, 0x63, 0xb0, 0xc7, 0xa5, 0xcd, 0x6b, 0x94, 0x9a, + 0xd4, 0xe4, 0xbb, 0x35, 0xf5, 0xe6, 0xc3, 0xca, 0xd1, 0xb1, 0x9a, 0xcc, 0xc6, 0x34, 0x3d, 0x79, + 0x0e, 0x46, 0xfd, 0x40, 0xf7, 0x02, 0xe9, 0xd6, 0x9b, 0x18, 0x17, 0x61, 0x1f, 0xca, 0xeb, 0xd5, + 0x8a, 0xe5, 0x61, 0x82, 0x32, 0x53, 0x5f, 0x9c, 0x38, 0x3e, 0x7d, 0x91, 0x67, 0xb6, 0xfa, 0x93, + 0x22, 0x5c, 0x5c, 0xa4, 0xc1, 0x8a, 0xeb, 0x48, 0xa7, 0x68, 0x96, 0xda, 0x3f, 0x90, 0x4f, 0x34, + 0xa9, 0xb4, 0x8b, 0x47, 0xaa, 0xb4, 0x4b, 0x47, 0xa4, 0xb4, 0xcb, 0xc7, 0xa8, 0xb4, 0x7f, 0xaf, + 0x08, 0x0f, 0x27, 0x24, 0xb9, 0xea, 0x9a, 0x6a, 0xc2, 0x7f, 0x5f, 0x80, 0x07, 0x10, 0xe0, 0x6d, + 0x61, 0x77, 0xf2, 0x6d, 0xad, 0x94, 0xc5, 0xf3, 0xd5, 0xb4, 0xc5, 0xf3, 0x4a, 0x1e, 0xcd, 0x97, + 0xc1, 0xe1, 0x40, 0x1a, 0xef, 0x45, 0x20, 0x9e, 0xdc, 0x84, 0x13, 0xae, 0x9f, 0x98, 0xd1, 0x13, + 0xc6, 0x95, 0x61, 0x1f, 0x05, 0x66, 0x94, 0x22, 0x2d, 0x78, 0xc8, 0xa7, 0x4e, 0x60, 0x39, 0xd4, + 0x4e, 0xc2, 0x09, 0x6b, 0xe8, 0x51, 0x09, 0xf7, 0x50, 0x2b, 0x8b, 0x08, 0xb3, 0xcb, 0xe6, 0x99, + 0x07, 0xfe, 0x1c, 0xb8, 0xc9, 0x29, 0x44, 0x73, 0x64, 0x16, 0xcb, 0x9b, 0x69, 0x8b, 0xe5, 0xd5, + 0xfc, 0xed, 0x36, 0x9c, 0xb5, 0x32, 0x03, 0xc0, 0x5b, 0x21, 0x6e, 0xae, 0x84, 0x4a, 0x1a, 0xc3, + 0x1c, 0x8c, 0x51, 0x31, 0x05, 0xa4, 0xe4, 0x1c, 0xb7, 0x54, 0x42, 0x05, 0xd4, 0x8a, 0x67, 0x62, + 0x92, 0x76, 0xa0, 0xb5, 0x53, 0x19, 0xda, 0xda, 0x79, 0x11, 0x48, 0xc2, 0xf1, 0x28, 0xf0, 0x46, + 0x92, 0x61, 0x8d, 0x4b, 0x7d, 0x14, 0x98, 0x51, 0x6a, 0x40, 0x57, 0xae, 0x1e, 0x6d, 0x57, 0xae, + 0x0d, 0xdf, 0x95, 0xc9, 0xab, 0x70, 0x96, 0xb3, 0x92, 0xf2, 0x49, 0x02, 0x0b, 0xbb, 0xe7, 0x03, + 0x12, 0xf8, 0x2c, 0x0e, 0x22, 0xc4, 0xc1, 0x18, 0xac, 0x7d, 0x0c, 0x8f, 0x9a, 0x8c, 0xb9, 0x6e, + 0x0f, 0xb6, 0x89, 0xe6, 0x32, 0x68, 0x30, 0xb3, 0x24, 0xeb, 0x62, 0x01, 0xeb, 0x86, 0xfa, 0x86, + 0x4d, 0x4d, 0x19, 0xd6, 0x19, 0x76, 0xb1, 0xb5, 0xe5, 0x96, 0xcc, 0xc1, 0x18, 0x55, 0x96, 0x99, + 0x32, 0x7a, 0x48, 0x33, 0x65, 0x91, 0x7b, 0xe9, 0x37, 0x13, 0xd6, 0x90, 0xb4, 0x75, 0xc2, 0x40, + 0xdd, 0xb9, 0x34, 0x01, 0xf6, 0x97, 0xe1, 0x56, 0xa2, 0xe1, 0x59, 0xdd, 0xc0, 0x4f, 0x62, 0x8d, + 0xa7, 0xac, 0xc4, 0x0c, 0x1a, 0xcc, 0x2c, 0xc9, 0xec, 0xf3, 0x2d, 0xaa, 0xdb, 0xc1, 0x56, 0x12, + 0xf0, 0x44, 0xd2, 0x3e, 0xbf, 0xd2, 0x4f, 0x82, 0x59, 0xe5, 0x32, 0x15, 0xd2, 0xc9, 0xfb, 0xd3, + 0xac, 0xfa, 0x4a, 0x09, 0xce, 0x2e, 0xd2, 0x20, 0x8c, 0xab, 0x79, 0xdf, 0x8d, 0xf2, 0x2e, 0xb8, + 0x51, 0xbe, 0x59, 0x81, 0xd3, 0x8b, 0x34, 0xe8, 0xb3, 0xc6, 0xfe, 0x8b, 0x8a, 0x7f, 0x05, 0x4e, + 0x47, 0xa1, 0x5c, 0xad, 0xc0, 0xf5, 0x84, 0x2e, 0x4f, 0xad, 0x96, 0x5b, 0xfd, 0x24, 0x98, 0x55, + 0x8e, 0x7c, 0x1a, 0x1e, 0xe6, 0xaa, 0xde, 0x69, 0x0b, 0xff, 0xac, 0x70, 0x26, 0xc4, 0x8e, 0x09, + 0x4c, 0x4a, 0xc8, 0x87, 0x5b, 0xd9, 0x64, 0x38, 0xa8, 0x3c, 0xf9, 0x22, 0x8c, 0x76, 0xad, 0x2e, + 0xb5, 0x2d, 0x87, 0xdb, 0x67, 0xb9, 0x43, 0x42, 0x56, 0x63, 0x60, 0xd1, 0x02, 0x2e, 0x9e, 0x8a, + 0x09, 0x86, 0x99, 0x3d, 0xb5, 0x76, 0x8c, 0x3d, 0xf5, 0xdf, 0x8a, 0x50, 0x5d, 0xf4, 0xdc, 0x5e, + 0xb7, 0xb9, 0x4b, 0xda, 0x30, 0x72, 0x93, 0x6f, 0x9e, 0xc9, 0xad, 0xa9, 0xe1, 0xc3, 0xa1, 0xc5, + 0x1e, 0x5c, 0x64, 0x12, 0x89, 0x77, 0x94, 0xf0, 0xac, 0x13, 0x6f, 0xd3, 0x5d, 0x6a, 0xca, 0x3d, + 0xb4, 0xb0, 0x13, 0x5f, 0x65, 0x89, 0x28, 0xf2, 0x48, 0x07, 0x4e, 0xe8, 0xb6, 0xed, 0xde, 0xa4, + 0xe6, 0xb2, 0x1e, 0x50, 0x87, 0xfa, 0x6a, 0x4b, 0xf2, 0xb0, 0x6e, 0x69, 0xbe, 0xaf, 0x3f, 0x9b, + 0x84, 0xc2, 0x34, 0x36, 0x79, 0x0d, 0xaa, 0x7e, 0xe0, 0x7a, 0xca, 0xd8, 0x6a, 0xcc, 0xcc, 0x0d, + 0xdf, 0xe8, 0xcd, 0x4f, 0xb5, 0x04, 0x94, 0xf0, 0xd9, 0xcb, 0x17, 0x54, 0x0c, 0xb4, 0x6f, 0x14, + 0x00, 0xae, 0xac, 0xad, 0xad, 0xca, 0xed, 0x05, 0x13, 0xca, 0x7a, 0x2f, 0xdc, 0xa8, 0x1c, 0x7e, + 0x43, 0x30, 0x11, 0x0f, 0x29, 0xf7, 0xf0, 0x7a, 0xc1, 0x16, 0x72, 0x74, 0xf2, 0x41, 0xa8, 0x4a, + 0x03, 0x59, 0x8a, 0x3d, 0x0c, 0x2d, 0x90, 0x46, 0x34, 0xaa, 0x7c, 0xed, 0xb7, 0x8a, 0x00, 0x4b, + 0xa6, 0x4d, 0x5b, 0x2a, 0x82, 0xbd, 0x1e, 0x6c, 0x79, 0xd4, 0xdf, 0x72, 0x6d, 0x73, 0xc8, 0xdd, + 0x54, 0xee, 0xf3, 0x5f, 0x53, 0x20, 0x18, 0xe1, 0x11, 0x13, 0x46, 0xfd, 0x80, 0x76, 0x97, 0x9c, + 0x80, 0x7a, 0x3b, 0xba, 0x3d, 0xe4, 0x26, 0xca, 0x49, 0xe1, 0x17, 0x89, 0x70, 0x30, 0x81, 0x4a, + 0x74, 0x68, 0x58, 0x8e, 0x21, 0x06, 0x48, 0x73, 0x77, 0xc8, 0x8e, 0x74, 0x82, 0xad, 0x38, 0x96, + 0x22, 0x18, 0x8c, 0x63, 0x6a, 0x3f, 0x28, 0xc2, 0x19, 0xce, 0x8f, 0x55, 0x23, 0x11, 0x8f, 0x49, + 0xfe, 0x6f, 0xdf, 0x39, 0xb8, 0xff, 0x79, 0x30, 0xd6, 0xe2, 0x18, 0xd5, 0x0a, 0x0d, 0xf4, 0xc8, + 0x9e, 0x8b, 0xd2, 0x62, 0x87, 0xdf, 0x7a, 0x50, 0xf6, 0xd9, 0x7c, 0x25, 0xa4, 0xd7, 0x1a, 0xba, + 0x0b, 0x65, 0x7f, 0x00, 0x9f, 0xbd, 0xc2, 0x5d, 0x63, 0x3e, 0x6b, 0x71, 0x76, 0xe4, 0x0b, 0x30, + 0xe2, 0x07, 0x7a, 0xd0, 0x53, 0x43, 0x73, 0xfd, 0xa8, 0x19, 0x73, 0xf0, 0x68, 0x1e, 0x11, 0xef, + 0x28, 0x99, 0x6a, 0x3f, 0x28, 0xc0, 0xb9, 0xec, 0x82, 0xcb, 0x96, 0x1f, 0x90, 0xff, 0xd3, 0x27, + 0xf6, 0x03, 0xb6, 0x38, 0x2b, 0xcd, 0x85, 0x1e, 0x06, 0x64, 0xab, 0x94, 0x98, 0xc8, 0x03, 0xa8, + 0x58, 0x01, 0xed, 0xa8, 0xf5, 0xe5, 0xf5, 0x23, 0xfe, 0xf4, 0x98, 0x6a, 0x67, 0x5c, 0x50, 0x30, + 0xd3, 0xde, 0x2a, 0x0e, 0xfa, 0x64, 0xae, 0x3e, 0xec, 0x64, 0xcc, 0xef, 0xd5, 0x7c, 0x31, 0xbf, + 0xc9, 0x0a, 0xf5, 0x87, 0xfe, 0xfe, 0xbf, 0xfe, 0xd0, 0xdf, 0xeb, 0xf9, 0x43, 0x7f, 0x53, 0x62, + 0x18, 0x18, 0x01, 0xfc, 0x4e, 0x09, 0xce, 0xdf, 0xa9, 0xdb, 0x30, 0x7d, 0x26, 0x7b, 0x67, 0x5e, + 0x7d, 0x76, 0xe7, 0x7e, 0x48, 0x66, 0xa0, 0xd2, 0xdd, 0xd2, 0x7d, 0x65, 0x94, 0xa9, 0x05, 0x4b, + 0x65, 0x95, 0x25, 0xde, 0x66, 0x93, 0x06, 0x37, 0xe6, 0xf8, 0x2b, 0x0a, 0x52, 0x36, 0x1d, 0x77, + 0xa8, 0xef, 0x47, 0x3e, 0x81, 0x70, 0x3a, 0x5e, 0x11, 0xc9, 0xa8, 0xf2, 0x49, 0x00, 0x23, 0xc2, + 0xc5, 0x2c, 0x35, 0xd3, 0xf0, 0x81, 0x5c, 0x19, 0x61, 0xe2, 0xd1, 0x47, 0xc9, 0xdd, 0x0a, 0xc9, + 0x8b, 0x4c, 0x41, 0x39, 0x88, 0x82, 0x76, 0xd5, 0xd2, 0xbc, 0x9c, 0x61, 0x9f, 0x72, 0x3a, 0xb6, + 0xb0, 0x77, 0x37, 0xb8, 0x53, 0xdd, 0x94, 0xfb, 0xe7, 0x96, 0xeb, 0x70, 0x83, 0xac, 0x14, 0x2d, + 0xec, 0xaf, 0xf7, 0x51, 0x60, 0x46, 0x29, 0xed, 0xaf, 0x6a, 0x70, 0x26, 0xbb, 0x3f, 0x30, 0xb9, + 0xed, 0x50, 0xcf, 0x67, 0xd8, 0x85, 0xa4, 0xdc, 0x6e, 0x88, 0x64, 0x54, 0xf9, 0xef, 0xe9, 0x80, + 0xb3, 0x6f, 0x16, 0xe0, 0xac, 0x27, 0xf7, 0x88, 0xee, 0x45, 0xd0, 0xd9, 0xa3, 0xc2, 0x9d, 0x31, + 0x80, 0x21, 0x0e, 0xae, 0x0b, 0xf9, 0x8d, 0x02, 0x4c, 0x74, 0x52, 0x7e, 0x8e, 0x63, 0x3c, 0x30, + 0xc6, 0xa3, 0xe2, 0x57, 0x06, 0xf0, 0xc3, 0x81, 0x35, 0x21, 0x5f, 0x84, 0x46, 0x97, 0xf5, 0x0b, + 0x3f, 0xa0, 0x8e, 0xa1, 0xce, 0x8c, 0x0d, 0x3f, 0x92, 0x56, 0x23, 0x2c, 0x15, 0x8a, 0x26, 0xec, + 0x83, 0x58, 0x06, 0xc6, 0x39, 0xde, 0xe7, 0x27, 0xc4, 0x2e, 0x41, 0xcd, 0xa7, 0x41, 0x60, 0x39, + 0x6d, 0xb1, 0xde, 0xa8, 0x8b, 0xb1, 0xd2, 0x92, 0x69, 0x18, 0xe6, 0x92, 0x0f, 0x41, 0x9d, 0x6f, + 0x39, 0xcd, 0x7a, 0x6d, 0x7f, 0xa2, 0xce, 0xc3, 0xc5, 0xc6, 0x44, 0x00, 0x9c, 0x4c, 0xc4, 0x28, + 0x9f, 0x3c, 0x05, 0xa3, 0x1b, 0x7c, 0xf8, 0xca, 0xe3, 0xbc, 0xc2, 0xc7, 0xc5, 0xad, 0xb5, 0x66, + 0x2c, 0x1d, 0x13, 0x54, 0x64, 0x06, 0x80, 0x86, 0xfb, 0x72, 0x69, 0x7f, 0x56, 0xb4, 0x63, 0x87, + 0x31, 0x2a, 0xf2, 0x28, 0x94, 0x02, 0xdb, 0xe7, 0x3e, 0xac, 0x5a, 0xb4, 0x04, 0x5d, 0x5b, 0x6e, + 0x21, 0x4b, 0xd7, 0x7e, 0x56, 0x80, 0x13, 0xa9, 0xc3, 0x25, 0xac, 0x48, 0xcf, 0xb3, 0xe5, 0x34, + 0x12, 0x16, 0x59, 0xc7, 0x65, 0x64, 0xe9, 0xe4, 0x55, 0x69, 0x96, 0x17, 0x73, 0xde, 0x5c, 0x70, + 0x4d, 0x0f, 0x7c, 0x66, 0x87, 0xf7, 0x59, 0xe4, 0x7c, 0x9b, 0x2f, 0xaa, 0x8f, 0xd4, 0x03, 0xb1, + 0x6d, 0xbe, 0x28, 0x0f, 0x13, 0x94, 0x29, 0x87, 0x5f, 0xf9, 0x20, 0x0e, 0x3f, 0xed, 0x6b, 0xc5, + 0x98, 0x04, 0xa4, 0x65, 0x7f, 0x17, 0x09, 0x3c, 0xc1, 0x14, 0x68, 0xa8, 0xdc, 0xeb, 0x71, 0xfd, + 0xc7, 0x95, 0xb1, 0xcc, 0x25, 0x2f, 0x09, 0xd9, 0x97, 0x72, 0x9e, 0x42, 0x5d, 0x5b, 0x6e, 0x89, + 0xe8, 0x2a, 0xd5, 0x6a, 0x61, 0x13, 0x94, 0x8f, 0xa9, 0x09, 0xb4, 0x3f, 0x2b, 0x41, 0xe3, 0x45, + 0x77, 0xe3, 0x3d, 0x12, 0x41, 0x9d, 0xad, 0xa6, 0x8a, 0xef, 0xa2, 0x9a, 0x5a, 0x87, 0x87, 0x83, + 0xc0, 0x6e, 0x51, 0xc3, 0x75, 0x4c, 0x7f, 0x76, 0x33, 0xa0, 0xde, 0x82, 0xe5, 0x58, 0xfe, 0x16, + 0x35, 0xe5, 0x76, 0xd2, 0x23, 0xfb, 0x7b, 0x93, 0x0f, 0xaf, 0xad, 0x2d, 0x67, 0x91, 0xe0, 0xa0, + 0xb2, 0x7c, 0xda, 0xd0, 0x8d, 0x6d, 0x77, 0x73, 0x93, 0x9f, 0x94, 0x91, 0x31, 0x37, 0x62, 0xda, + 0x88, 0xa5, 0x63, 0x82, 0x4a, 0x7b, 0xbb, 0x08, 0xf5, 0xf0, 0xe4, 0x3b, 0x79, 0x1c, 0xaa, 0x1b, + 0x9e, 0xbb, 0x4d, 0x3d, 0xb1, 0x73, 0x27, 0x4f, 0xca, 0x34, 0x45, 0x12, 0xaa, 0x3c, 0xf2, 0x18, + 0x54, 0x02, 0xb7, 0x6b, 0x19, 0x69, 0x87, 0xda, 0x1a, 0x4b, 0x44, 0x91, 0x77, 0x7c, 0x1d, 0xfc, + 0x89, 0x84, 0x69, 0x57, 0x1f, 0x68, 0x8c, 0xbd, 0x02, 0x65, 0x5f, 0xf7, 0x6d, 0xa9, 0x4f, 0x73, + 0x1c, 0x22, 0x9f, 0x6d, 0x2d, 0xcb, 0x43, 0xe4, 0xb3, 0xad, 0x65, 0xe4, 0xa0, 0xda, 0x8f, 0x8b, + 0xd0, 0x10, 0x72, 0x13, 0xb3, 0xc2, 0x51, 0x4a, 0xee, 0x79, 0x1e, 0x4a, 0xe1, 0xf7, 0x3a, 0xd4, + 0xe3, 0x6e, 0x26, 0x39, 0xc9, 0xc5, 0xf7, 0x07, 0xa2, 0xcc, 0x30, 0x9c, 0x22, 0x4a, 0x52, 0xa2, + 0x2f, 0x1f, 0xa3, 0xe8, 0x2b, 0x07, 0x12, 0xfd, 0xc8, 0x71, 0x88, 0xfe, 0xcd, 0x22, 0xd4, 0x97, + 0xad, 0x4d, 0x6a, 0xec, 0x1a, 0x36, 0x3f, 0x13, 0x68, 0x52, 0x9b, 0x06, 0x74, 0xd1, 0xd3, 0x0d, + 0xba, 0x4a, 0x3d, 0x8b, 0xdf, 0xd9, 0xc2, 0xc6, 0x07, 0x9f, 0x81, 0xe4, 0x99, 0xc0, 0xf9, 0x01, + 0x34, 0x38, 0xb0, 0x34, 0x59, 0x82, 0x51, 0x93, 0xfa, 0x96, 0x47, 0xcd, 0xd5, 0xd8, 0x42, 0xe5, + 0x71, 0xa5, 0x6a, 0xe6, 0x63, 0x79, 0xb7, 0xf7, 0x26, 0xc7, 0x94, 0x83, 0x52, 0xac, 0x58, 0x12, + 0x45, 0xd9, 0x90, 0xef, 0xea, 0x3d, 0x3f, 0xab, 0x8e, 0xb1, 0x21, 0xbf, 0x9a, 0x4d, 0x82, 0x83, + 0xca, 0x6a, 0x15, 0x28, 0x2d, 0xbb, 0x6d, 0xed, 0xad, 0x12, 0x84, 0x97, 0xfb, 0x90, 0xff, 0x5f, + 0x80, 0x86, 0xee, 0x38, 0x6e, 0x20, 0x2f, 0xce, 0x11, 0x3b, 0xf0, 0x98, 0xfb, 0x0e, 0xa1, 0xa9, + 0xd9, 0x08, 0x54, 0x6c, 0xde, 0x86, 0x1b, 0xca, 0xb1, 0x1c, 0x8c, 0xf3, 0x26, 0xbd, 0xd4, 0x7e, + 0xf2, 0x4a, 0xfe, 0x5a, 0x1c, 0x60, 0xf7, 0xf8, 0xdc, 0x27, 0xe1, 0x64, 0xba, 0xb2, 0x87, 0xd9, + 0x0e, 0xca, 0xb5, 0x31, 0x5f, 0x04, 0x88, 0x62, 0x4a, 0xee, 0x81, 0x13, 0xcb, 0x4a, 0x38, 0xb1, + 0x16, 0x87, 0x17, 0x70, 0x58, 0xe9, 0x81, 0x8e, 0xab, 0xd7, 0x53, 0x8e, 0xab, 0xa5, 0xa3, 0x60, + 0x76, 0x67, 0x67, 0xd5, 0x6f, 0x16, 0xe0, 0x64, 0x44, 0x2c, 0x4f, 0xc8, 0x3e, 0x0b, 0x63, 0x1e, + 0xd5, 0xcd, 0xa6, 0x1e, 0x18, 0x5b, 0x3c, 0xd4, 0xbb, 0xc0, 0x63, 0xb3, 0x4f, 0xed, 0xef, 0x4d, + 0x8e, 0x61, 0x3c, 0x03, 0x93, 0x74, 0x44, 0x87, 0x06, 0x4b, 0x58, 0xb3, 0x3a, 0xd4, 0xed, 0x05, + 0x43, 0x7a, 0x4d, 0xf9, 0x82, 0x05, 0x23, 0x18, 0x8c, 0x63, 0x6a, 0xef, 0x14, 0x60, 0x3c, 0x5e, + 0xe1, 0x63, 0xf7, 0xa8, 0x6d, 0x25, 0x3d, 0x6a, 0x73, 0x47, 0xd0, 0x26, 0x03, 0xbc, 0x68, 0x3f, + 0xa9, 0xc5, 0x3f, 0x8d, 0x7b, 0xce, 0xe2, 0xce, 0x82, 0xc2, 0x1d, 0x9d, 0x05, 0xef, 0xfd, 0x5b, + 0x63, 0x06, 0x59, 0xb9, 0xe5, 0xfb, 0xd8, 0xca, 0x7d, 0x37, 0xaf, 0x9e, 0x89, 0x5d, 0x9f, 0x32, + 0x92, 0xe3, 0xfa, 0x94, 0x4e, 0x78, 0x7d, 0x4a, 0xf5, 0xc8, 0x26, 0x9d, 0x83, 0x5c, 0xa1, 0x52, + 0xbb, 0xa7, 0x57, 0xa8, 0xd4, 0x8f, 0xeb, 0x0a, 0x15, 0xc8, 0x7b, 0x85, 0xca, 0x57, 0x0b, 0x30, + 0x6e, 0x26, 0x4e, 0xcc, 0x72, 0xdf, 0x42, 0x1e, 0x55, 0x93, 0x3c, 0x80, 0x2b, 0x8e, 0x4c, 0x25, + 0xd3, 0x30, 0xc5, 0x52, 0xfb, 0x51, 0x39, 0xae, 0x07, 0xee, 0xb5, 0xab, 0xfa, 0x99, 0xa4, 0xab, + 0xfa, 0x62, 0xda, 0x55, 0x7d, 0x22, 0x16, 0x45, 0x1a, 0x77, 0x57, 0x7f, 0x38, 0x36, 0x3d, 0xb2, + 0x39, 0x69, 0x2c, 0x92, 0x74, 0xc6, 0x14, 0xf9, 0x61, 0xa8, 0xf9, 0xea, 0x1a, 0x46, 0xb1, 0xb0, + 0x89, 0xda, 0x45, 0x5d, 0x91, 0x18, 0x52, 0x30, 0x4b, 0xdc, 0xa3, 0xba, 0xef, 0x3a, 0x69, 0x4b, + 0x1c, 0x79, 0x2a, 0xca, 0xdc, 0xb8, 0xcb, 0x7c, 0xe4, 0x2e, 0x2e, 0x73, 0x1d, 0x1a, 0xb6, 0xee, + 0x07, 0xeb, 0x5d, 0x53, 0x0f, 0xa8, 0x29, 0xc7, 0xdb, 0xff, 0x38, 0x98, 0xae, 0x62, 0xfa, 0x2f, + 0x32, 0x08, 0x97, 0x23, 0x18, 0x8c, 0x63, 0x12, 0x13, 0x46, 0xd9, 0x2b, 0x1f, 0x0d, 0xe6, 0xac, + 0xba, 0x02, 0xe0, 0x30, 0x3c, 0x42, 0x4f, 0xcf, 0x72, 0x0c, 0x07, 0x13, 0xa8, 0x03, 0xbc, 0xea, + 0xf5, 0xa1, 0xbc, 0xea, 0x5f, 0xad, 0x43, 0xe3, 0x9a, 0x1e, 0x58, 0x3b, 0x94, 0xef, 0xe2, 0x1c, + 0x8f, 0x2b, 0xfd, 0x57, 0x0a, 0x70, 0x26, 0x19, 0xaa, 0x77, 0x8c, 0xfe, 0x74, 0x7e, 0xf1, 0x07, + 0x66, 0x72, 0xc3, 0x01, 0xb5, 0xe0, 0x9e, 0xf5, 0xbe, 0xc8, 0xbf, 0xe3, 0xf6, 0xac, 0xb7, 0x06, + 0x31, 0xc4, 0xc1, 0x75, 0x79, 0xaf, 0x78, 0xd6, 0xef, 0xef, 0x8b, 0xd9, 0x52, 0x7e, 0xff, 0xea, + 0x7d, 0xe3, 0xf7, 0xaf, 0xdd, 0x17, 0xc6, 0x56, 0x37, 0xe6, 0xf7, 0xaf, 0xe7, 0x8c, 0x3f, 0x91, + 0xd1, 0xed, 0x02, 0x6d, 0xd0, 0xfe, 0x01, 0x3f, 0x98, 0xae, 0xfc, 0xb1, 0xcc, 0x46, 0xd9, 0xd0, + 0x7d, 0xcb, 0x90, 0x6a, 0x2f, 0xc7, 0x45, 0x94, 0xea, 0xc6, 0x2e, 0xb1, 0x4d, 0xcd, 0x5f, 0x51, + 0x60, 0x47, 0x37, 0x83, 0x15, 0x73, 0xdd, 0x0c, 0x46, 0xe6, 0xa0, 0xec, 0xb0, 0xd5, 0x73, 0xe9, + 0xd0, 0x77, 0x81, 0x5d, 0xbb, 0x4a, 0x77, 0x91, 0x17, 0xd6, 0xde, 0x2e, 0x02, 0xb0, 0xcf, 0x3f, + 0x98, 0x07, 0xfe, 0x83, 0x50, 0xf5, 0x7b, 0x7c, 0xad, 0x2c, 0x15, 0x76, 0x14, 0xb4, 0x23, 0x92, + 0x51, 0xe5, 0x93, 0xc7, 0xa0, 0xf2, 0x7a, 0x8f, 0xf6, 0xd4, 0x76, 0x72, 0x68, 0xae, 0x7d, 0x8a, + 0x25, 0xa2, 0xc8, 0x3b, 0x3e, 0x6f, 0x9a, 0xf2, 0xd4, 0x57, 0x8e, 0xcb, 0x53, 0x5f, 0x87, 0xea, + 0x35, 0x97, 0xc7, 0x00, 0x6a, 0xff, 0x5c, 0x04, 0x88, 0x62, 0xac, 0xc8, 0x37, 0x0a, 0xf0, 0x50, + 0x38, 0xe0, 0x02, 0x61, 0x75, 0xcf, 0xd9, 0xba, 0xd5, 0xc9, 0xed, 0xb5, 0xcf, 0x1a, 0xec, 0x7c, + 0x06, 0x5a, 0xcd, 0x62, 0x87, 0xd9, 0xb5, 0x20, 0x08, 0x35, 0xda, 0xe9, 0x06, 0xbb, 0xf3, 0x96, + 0x27, 0x7b, 0x60, 0x66, 0x28, 0xdf, 0x65, 0x49, 0x23, 0x8a, 0xca, 0xa5, 0x21, 0x1f, 0x44, 0x2a, + 0x07, 0x43, 0x1c, 0xb2, 0x05, 0x35, 0xc7, 0x7d, 0xd5, 0x67, 0xe2, 0x90, 0xdd, 0xf1, 0x85, 0xe1, + 0x45, 0x2e, 0xc4, 0x2a, 0xbc, 0xbc, 0xf2, 0x05, 0xab, 0x8e, 0x14, 0xf6, 0xd7, 0x8b, 0x70, 0x3a, + 0x43, 0x0e, 0xe4, 0x05, 0x38, 0x29, 0xc3, 0xd9, 0xa2, 0xeb, 0x89, 0x0b, 0xd1, 0xf5, 0xc4, 0xad, + 0x54, 0x1e, 0xf6, 0x51, 0x93, 0x57, 0x01, 0x74, 0xc3, 0xa0, 0xbe, 0xbf, 0xe2, 0x9a, 0xca, 0x1e, + 0x7d, 0x7e, 0x7f, 0x6f, 0x12, 0x66, 0xc3, 0xd4, 0xdb, 0x7b, 0x93, 0x1f, 0xc9, 0x8a, 0x50, 0x4d, + 0xc9, 0x39, 0x2a, 0x80, 0x31, 0x48, 0xf2, 0x39, 0x00, 0xb1, 0xf4, 0x0a, 0x0f, 0xd1, 0xdf, 0xc5, + 0x5f, 0x31, 0xa5, 0xae, 0x2b, 0x9a, 0xfa, 0x54, 0x4f, 0x77, 0x02, 0x2b, 0xd8, 0x15, 0x77, 0x96, + 0xdc, 0x08, 0x51, 0x30, 0x86, 0xa8, 0xfd, 0x71, 0x11, 0x6a, 0xca, 0x53, 0x7a, 0x0f, 0xdc, 0x63, + 0xed, 0x84, 0x7b, 0xec, 0x88, 0x62, 0x52, 0xb3, 0x9c, 0x63, 0x6e, 0xca, 0x39, 0xb6, 0x98, 0x9f, + 0xd5, 0x9d, 0x5d, 0x63, 0xdf, 0x2e, 0xc2, 0xb8, 0x22, 0xcd, 0xeb, 0x18, 0xfb, 0x04, 0x9c, 0x10, + 0x7b, 0xc9, 0x2b, 0xfa, 0x2d, 0x71, 0x7d, 0x0b, 0x17, 0x58, 0x59, 0x84, 0x81, 0x36, 0x93, 0x59, + 0x98, 0xa6, 0x65, 0xdd, 0x5a, 0x24, 0xad, 0xb3, 0x75, 0x84, 0xd8, 0x7d, 0x12, 0xeb, 0x1d, 0xde, + 0xad, 0x9b, 0xa9, 0x3c, 0xec, 0xa3, 0x4e, 0x7b, 0xe6, 0xca, 0xc7, 0xe0, 0x99, 0xfb, 0xeb, 0x02, + 0x8c, 0x46, 0xf2, 0x3a, 0x76, 0xbf, 0xdc, 0x66, 0xd2, 0x2f, 0x37, 0x9b, 0xbb, 0x3b, 0x0c, 0xf0, + 0xca, 0xfd, 0x42, 0x15, 0x12, 0xa1, 0xd1, 0x64, 0x03, 0xce, 0x59, 0x99, 0x01, 0x5e, 0xb1, 0xd9, + 0x26, 0x3c, 0xeb, 0xbb, 0x34, 0x90, 0x12, 0xef, 0x80, 0x42, 0x7a, 0x50, 0xdb, 0xa1, 0x5e, 0x60, + 0x19, 0x54, 0x7d, 0xdf, 0x62, 0x6e, 0x93, 0x4c, 0xfa, 0x1e, 0x43, 0x99, 0xde, 0x90, 0x0c, 0x30, + 0x64, 0x45, 0x36, 0xa0, 0x42, 0xcd, 0x36, 0x55, 0x17, 0xea, 0xe4, 0xbc, 0xae, 0x32, 0x94, 0x27, + 0x7b, 0xf3, 0x51, 0x40, 0x13, 0x1f, 0xea, 0xb6, 0xda, 0x5b, 0x92, 0xfd, 0x70, 0x78, 0x03, 0x2b, + 0xdc, 0xa5, 0x8a, 0xce, 0xda, 0x87, 0x49, 0x18, 0xf1, 0x21, 0xdb, 0xa1, 0x93, 0xab, 0x72, 0x44, + 0x93, 0xc7, 0x1d, 0x5c, 0x5c, 0x3e, 0xd4, 0x6f, 0xea, 0x01, 0xf5, 0x3a, 0xba, 0xb7, 0x2d, 0x57, + 0x1b, 0xc3, 0x7f, 0xe1, 0x4b, 0x0a, 0x29, 0xfa, 0xc2, 0x30, 0x09, 0x23, 0x3e, 0xc4, 0x85, 0x7a, + 0x20, 0xcd, 0x67, 0xe5, 0xc9, 0x1b, 0x9e, 0xa9, 0x32, 0xc4, 0x7d, 0x19, 0x22, 0xad, 0x5e, 0x31, + 0xe2, 0x41, 0x76, 0x12, 0x57, 0xf9, 0x8a, 0x0b, 0x9c, 0x9b, 0x39, 0x3c, 0xc2, 0x12, 0x2a, 0x52, + 0x37, 0xd9, 0x57, 0x02, 0x6b, 0x6f, 0x57, 0xa2, 0x69, 0xf9, 0x5e, 0xfb, 0xa9, 0x9e, 0x4a, 0xfa, + 0xa9, 0x2e, 0xa4, 0xfd, 0x54, 0xa9, 0x2d, 0xca, 0xc3, 0x07, 0x55, 0xa6, 0x3c, 0x44, 0xe5, 0x63, + 0xf0, 0x10, 0x3d, 0x09, 0x8d, 0x1d, 0x3e, 0x13, 0x88, 0xdb, 0x79, 0x2a, 0x5c, 0x8d, 0xf0, 0x99, + 0xfd, 0x46, 0x94, 0x8c, 0x71, 0x1a, 0x56, 0x44, 0x58, 0x20, 0xd1, 0xf5, 0xa6, 0xb2, 0x48, 0x2b, + 0x4a, 0xc6, 0x38, 0x0d, 0x8f, 0xc7, 0xb2, 0x9c, 0x6d, 0x51, 0xa0, 0xca, 0x0b, 0x88, 0x78, 0x2c, + 0x95, 0x88, 0x51, 0x3e, 0xb9, 0x04, 0xb5, 0x9e, 0xb9, 0x29, 0x68, 0x6b, 0x9c, 0x96, 0x5b, 0x98, + 0xeb, 0xf3, 0x0b, 0xf2, 0xb6, 0x20, 0x95, 0xcb, 0x6a, 0xd2, 0xd1, 0xbb, 0x2a, 0x83, 0xaf, 0x0d, + 0x65, 0x4d, 0x56, 0xa2, 0x64, 0x8c, 0xd3, 0x90, 0x8f, 0xc1, 0xb8, 0x47, 0xcd, 0x9e, 0x41, 0xc3, + 0x52, 0xc0, 0x4b, 0x71, 0xaf, 0x28, 0x26, 0x72, 0x30, 0x45, 0x39, 0xc0, 0xcf, 0xd5, 0x18, 0xca, + 0xcf, 0xf5, 0xfd, 0x02, 0x90, 0xfe, 0xf8, 0x65, 0xb2, 0x05, 0x23, 0x0e, 0xf7, 0x7e, 0xe5, 0xbe, + 0x10, 0x39, 0xe6, 0x44, 0x13, 0xd3, 0x92, 0x4c, 0x90, 0xf8, 0xc4, 0x81, 0x1a, 0xbd, 0x15, 0x50, + 0xcf, 0x09, 0xcf, 0x33, 0x1c, 0xcd, 0xe5, 0xcb, 0x62, 0x35, 0x20, 0x91, 0x31, 0xe4, 0xa1, 0xfd, + 0xb0, 0x08, 0x8d, 0x18, 0xdd, 0xdd, 0x16, 0x95, 0xfc, 0x48, 0xb5, 0x70, 0x3a, 0xad, 0x7b, 0xb6, + 0x1c, 0x61, 0xb1, 0x23, 0xd5, 0x32, 0x0b, 0x97, 0x31, 0x4e, 0x47, 0x66, 0x00, 0x3a, 0xba, 0x1f, + 0x50, 0x8f, 0x6b, 0xdf, 0xd4, 0x41, 0xe6, 0x95, 0x30, 0x07, 0x63, 0x54, 0xe4, 0xa2, 0xbc, 0x3e, + 0xbb, 0x9c, 0xbc, 0x78, 0x6e, 0xc0, 0xdd, 0xd8, 0x95, 0x23, 0xb8, 0x1b, 0x9b, 0xb4, 0xe1, 0xa4, + 0xaa, 0xb5, 0xca, 0x3d, 0xdc, 0xb5, 0x64, 0x62, 0xfd, 0x92, 0x82, 0xc0, 0x3e, 0x50, 0xed, 0xed, + 0x02, 0x8c, 0x25, 0x5c, 0x1e, 0xe2, 0xca, 0x38, 0x15, 0x7d, 0x9f, 0xb8, 0x32, 0x2e, 0x16, 0x34, + 0xff, 0x04, 0x8c, 0x08, 0x01, 0xa5, 0x83, 0xea, 0x84, 0x08, 0x51, 0xe6, 0xb2, 0xb9, 0x4c, 0x3a, + 0x55, 0xd3, 0x73, 0x99, 0xf4, 0xba, 0xa2, 0xca, 0x17, 0xee, 0x76, 0x51, 0xbb, 0x7e, 0x77, 0xbb, + 0x48, 0xc7, 0x90, 0x42, 0xfb, 0x51, 0x09, 0x78, 0x08, 0x0a, 0x79, 0x16, 0xea, 0x1d, 0x6a, 0x6c, + 0xe9, 0x8e, 0xe5, 0xab, 0x2b, 0x23, 0xd9, 0xea, 0xb6, 0xbe, 0xa2, 0x12, 0x6f, 0x33, 0x80, 0xd9, + 0xd6, 0x32, 0x8f, 0xf2, 0x8e, 0x68, 0x89, 0x01, 0x23, 0x6d, 0xdf, 0xd7, 0xbb, 0x56, 0xee, 0x1d, + 0x50, 0x71, 0x45, 0x9f, 0x18, 0x44, 0xe2, 0x19, 0x25, 0x34, 0x31, 0xa0, 0xd2, 0xb5, 0x75, 0xcb, + 0xc9, 0xfd, 0x8f, 0x12, 0xf6, 0x05, 0xab, 0x0c, 0x49, 0xb8, 0x74, 0xf8, 0x23, 0x0a, 0x6c, 0xd2, + 0x83, 0x86, 0x6f, 0x78, 0x7a, 0xc7, 0xdf, 0xd2, 0x67, 0x9e, 0x7e, 0x26, 0xb7, 0x91, 0x14, 0xb1, + 0x12, 0x73, 0xf6, 0x1c, 0xce, 0xae, 0xb4, 0xae, 0xcc, 0xce, 0x3c, 0xfd, 0x0c, 0xc6, 0xf9, 0xc4, + 0xd9, 0x3e, 0xfd, 0xe4, 0x8c, 0xec, 0xf7, 0x47, 0xce, 0xf6, 0xe9, 0x27, 0x67, 0x30, 0xce, 0x47, + 0xfb, 0xf7, 0x02, 0xd4, 0x43, 0x5a, 0xb2, 0x0e, 0xc0, 0x46, 0xa0, 0xbc, 0x54, 0xef, 0x50, 0x17, + 0xdc, 0xf3, 0x55, 0xf1, 0x7a, 0x58, 0x18, 0x63, 0x40, 0x19, 0xb7, 0x0e, 0x16, 0x8f, 0xfa, 0xd6, + 0xc1, 0x69, 0xa8, 0x6f, 0xe9, 0x8e, 0xe9, 0x6f, 0xe9, 0xdb, 0x62, 0x22, 0x8a, 0xdd, 0xc3, 0x79, + 0x45, 0x65, 0x60, 0x44, 0xa3, 0xfd, 0x4b, 0x05, 0xc4, 0xb6, 0x25, 0x1b, 0x2a, 0xa6, 0xe5, 0x8b, + 0xb8, 0xd9, 0x02, 0x2f, 0x19, 0x0e, 0x95, 0x79, 0x99, 0x8e, 0x21, 0x05, 0x39, 0x0b, 0xa5, 0x8e, + 0xe5, 0xc8, 0x1d, 0x0f, 0xee, 0xf0, 0x5a, 0xb1, 0x1c, 0x64, 0x69, 0x3c, 0x4b, 0xbf, 0x25, 0x43, + 0x9e, 0x44, 0x96, 0x7e, 0x0b, 0x59, 0x1a, 0x5b, 0x82, 0xda, 0xae, 0xbb, 0xbd, 0xa1, 0x1b, 0xdb, + 0x2a, 0x32, 0xaa, 0xcc, 0x15, 0x21, 0x5f, 0x82, 0x2e, 0x27, 0xb3, 0x30, 0x4d, 0x4b, 0x16, 0xe1, + 0x84, 0xe1, 0xba, 0xb6, 0xe9, 0xde, 0x74, 0x54, 0x71, 0x61, 0x3a, 0xf0, 0x9d, 0x84, 0x79, 0xda, + 0xf5, 0xa8, 0xc1, 0xec, 0x8b, 0xb9, 0x24, 0x11, 0xa6, 0x4b, 0x91, 0x75, 0x78, 0xf8, 0x0d, 0xea, + 0xb9, 0x72, 0xba, 0x68, 0xd9, 0x94, 0x76, 0x15, 0xa0, 0x30, 0x2c, 0x78, 0xa4, 0xd6, 0x67, 0xb2, + 0x49, 0x70, 0x50, 0x59, 0x1e, 0xf3, 0xa9, 0x7b, 0x6d, 0x1a, 0xac, 0x7a, 0xae, 0x41, 0x7d, 0xdf, + 0x72, 0xda, 0x0a, 0xb6, 0x1a, 0xc1, 0xae, 0x65, 0x93, 0xe0, 0xa0, 0xb2, 0xe4, 0x65, 0x98, 0x10, + 0x59, 0x42, 0x6b, 0xcf, 0xee, 0xe8, 0x96, 0xad, 0x6f, 0x58, 0xb6, 0xfa, 0x27, 0xd7, 0x98, 0xd8, + 0xa0, 0x58, 0x1b, 0x40, 0x83, 0x03, 0x4b, 0xf3, 0x3f, 0x69, 0xc9, 0xed, 0xa9, 0x55, 0xea, 0xf1, + 0x7e, 0x20, 0xed, 0x19, 0xf1, 0x27, 0xad, 0x54, 0x1e, 0xf6, 0x51, 0x13, 0x84, 0x33, 0x7c, 0xbb, + 0x7b, 0xbd, 0x9b, 0x12, 0xba, 0xb4, 0x70, 0xf8, 0x3e, 0x54, 0x2b, 0x93, 0x02, 0x07, 0x94, 0x64, + 0xdf, 0xcb, 0x73, 0xe6, 0xdd, 0x9b, 0x4e, 0x1a, 0xb5, 0x11, 0x7d, 0x6f, 0x6b, 0x00, 0x0d, 0x0e, + 0x2c, 0xad, 0xfd, 0x51, 0x11, 0xc6, 0x12, 0x27, 0x9f, 0xef, 0xbb, 0x13, 0xa6, 0xcc, 0x54, 0xec, + 0xf8, 0xed, 0xa5, 0xf9, 0x2b, 0x54, 0x37, 0xa9, 0x77, 0x95, 0xaa, 0x53, 0xea, 0x7c, 0xf4, 0xaf, + 0x24, 0x72, 0x30, 0x45, 0x49, 0x36, 0xa1, 0x22, 0x1c, 0x9f, 0x79, 0xff, 0x69, 0xa0, 0x64, 0xc4, + 0xbd, 0x9f, 0x5c, 0x37, 0x08, 0xdf, 0xa7, 0x80, 0xd7, 0x02, 0x18, 0x8d, 0x53, 0xb0, 0x11, 0x1f, + 0x59, 0x55, 0xd5, 0x84, 0x45, 0xb5, 0x04, 0xa5, 0x20, 0x18, 0xf6, 0xec, 0xaa, 0x70, 0xa4, 0xaf, + 0x2d, 0x23, 0xc3, 0xd0, 0x36, 0x59, 0xdb, 0xf9, 0xbe, 0xe5, 0x3a, 0xf2, 0x22, 0xe3, 0x75, 0xa8, + 0x06, 0xd2, 0x97, 0x34, 0xdc, 0xd9, 0x5b, 0xee, 0xd7, 0x55, 0x7e, 0x24, 0x85, 0xa5, 0xfd, 0x4d, + 0x11, 0xea, 0xe1, 0xba, 0xef, 0x00, 0x17, 0x04, 0xbb, 0x50, 0x0f, 0x03, 0x63, 0x72, 0xff, 0x9f, + 0x2c, 0x8a, 0xd7, 0xe0, 0x4b, 0x95, 0xf0, 0x15, 0x23, 0x1e, 0xf1, 0xa0, 0x9b, 0x52, 0x8e, 0xa0, + 0x9b, 0x2e, 0x54, 0x03, 0xcf, 0x6a, 0xb7, 0xa5, 0x11, 0x9a, 0x27, 0xea, 0x26, 0x14, 0xd7, 0x9a, + 0x00, 0x94, 0x92, 0x15, 0x2f, 0xa8, 0xd8, 0x68, 0xaf, 0xc1, 0xc9, 0x34, 0x25, 0xb7, 0xd0, 0x8c, + 0x2d, 0x6a, 0xf6, 0x6c, 0x25, 0xe3, 0xc8, 0x42, 0x93, 0xe9, 0x18, 0x52, 0xb0, 0x55, 0x1a, 0x6b, + 0xa6, 0x37, 0x5c, 0x47, 0xad, 0x7f, 0xb9, 0xb1, 0xbb, 0x26, 0xd3, 0x30, 0xcc, 0xd5, 0xfe, 0xa9, + 0x04, 0x67, 0xa3, 0xd5, 0xfb, 0x8a, 0xee, 0xe8, 0xed, 0x03, 0xfc, 0x94, 0xea, 0xfd, 0xd3, 0x0c, + 0x87, 0xbd, 0xe5, 0xbd, 0x74, 0x1f, 0xdc, 0xf2, 0xfe, 0xe3, 0x02, 0xf0, 0x20, 0x3e, 0xf2, 0x45, + 0x18, 0xd5, 0x63, 0xff, 0x23, 0x94, 0xcd, 0x79, 0x39, 0x77, 0x73, 0xf2, 0x58, 0xc1, 0x30, 0x28, + 0x25, 0x9e, 0x8a, 0x09, 0x86, 0xc4, 0x85, 0xda, 0xa6, 0x6e, 0xdb, 0xcc, 0x68, 0xc9, 0xbd, 0x1b, + 0x91, 0x60, 0xce, 0xbb, 0xf9, 0x82, 0x84, 0xc6, 0x90, 0x89, 0xf6, 0x8f, 0x05, 0x18, 0x6b, 0xd9, + 0x96, 0x69, 0x39, 0xed, 0x63, 0xbc, 0xde, 0xfd, 0x3a, 0x54, 0x7c, 0xdb, 0x32, 0xe9, 0x90, 0xf3, + 0xb8, 0xd0, 0x20, 0x0c, 0x00, 0x05, 0x4e, 0xf2, 0xbe, 0xf8, 0xd2, 0x01, 0xee, 0x8b, 0xff, 0xe9, + 0x08, 0xc8, 0x40, 0x50, 0xd2, 0x83, 0x7a, 0x5b, 0x5d, 0x43, 0x2d, 0xbf, 0xf1, 0x4a, 0x8e, 0x2b, + 0xcc, 0x12, 0x17, 0x5a, 0x8b, 0x59, 0x37, 0x4c, 0xc4, 0x88, 0x13, 0xa1, 0xc9, 0x5f, 0x50, 0xce, + 0xe7, 0xfc, 0x05, 0xa5, 0x60, 0xd7, 0xff, 0x13, 0x4a, 0x1d, 0xca, 0x5b, 0x41, 0xd0, 0x95, 0xe3, + 0x6a, 0xf8, 0x48, 0xdf, 0xe8, 0x16, 0x0d, 0x61, 0x8d, 0xb0, 0x77, 0xe4, 0xd0, 0x8c, 0x85, 0xa3, + 0x87, 0x7f, 0x3e, 0x9a, 0xcb, 0xb5, 0x23, 0x1d, 0x67, 0xc1, 0xde, 0x91, 0x43, 0x93, 0xcf, 0x43, + 0x23, 0xf0, 0x74, 0xc7, 0xdf, 0x74, 0xbd, 0x0e, 0xf5, 0xe4, 0x32, 0x6e, 0x21, 0xc7, 0x5f, 0x18, + 0xd7, 0x22, 0x34, 0xb1, 0xd5, 0x95, 0x48, 0xc2, 0x38, 0x37, 0xb2, 0x0d, 0xb5, 0x9e, 0x29, 0x2a, + 0x26, 0xfd, 0x1b, 0xb3, 0x79, 0x7e, 0xac, 0x19, 0xdb, 0x6f, 0x56, 0x6f, 0x18, 0x32, 0x48, 0xfe, + 0xe4, 0xab, 0x7a, 0x54, 0x3f, 0xf9, 0x8a, 0xf7, 0xc6, 0xac, 0x23, 0xfe, 0xa4, 0x23, 0x2d, 0x4a, + 0xa7, 0x2d, 0xc3, 0x65, 0x16, 0x72, 0x1b, 0x7b, 0x82, 0x65, 0x23, 0xb4, 0x4a, 0x9d, 0x36, 0x2a, + 0x1e, 0x5a, 0x07, 0xa4, 0x1b, 0x9a, 0x18, 0x89, 0x5f, 0x61, 0x88, 0x73, 0x27, 0xd3, 0x07, 0x9b, + 0x0f, 0xc2, 0x7f, 0x32, 0xc4, 0xae, 0xe2, 0xcd, 0xfc, 0xe7, 0x85, 0xf6, 0xb7, 0x45, 0x28, 0xad, + 0x2d, 0xb7, 0xc4, 0xf5, 0x7a, 0xfc, 0x3f, 0x33, 0xb4, 0xb5, 0x6d, 0x75, 0x6f, 0x50, 0xcf, 0xda, + 0xdc, 0x95, 0xab, 0xd3, 0xd8, 0xf5, 0x7a, 0x69, 0x0a, 0xcc, 0x28, 0x45, 0x5e, 0x81, 0x51, 0x43, + 0x9f, 0xa3, 0x5e, 0x30, 0xcc, 0xda, 0x9b, 0x1f, 0xb0, 0x9b, 0x9b, 0x8d, 0x8a, 0x63, 0x02, 0x8c, + 0xac, 0x03, 0x18, 0x11, 0x74, 0xe9, 0xd0, 0x1e, 0x83, 0x18, 0x70, 0x0c, 0x88, 0x20, 0xd4, 0xb7, + 0x19, 0x29, 0x47, 0x2d, 0x1f, 0x06, 0x95, 0xf7, 0x9c, 0xab, 0xaa, 0x2c, 0x46, 0x30, 0x9a, 0x03, + 0x63, 0x89, 0xff, 0x63, 0x90, 0x8f, 0x42, 0xcd, 0xed, 0xc6, 0xa6, 0xd3, 0x3a, 0x5f, 0x4e, 0xd7, + 0xae, 0xcb, 0xb4, 0xdb, 0x7b, 0x93, 0x63, 0xcb, 0x6e, 0xdb, 0x32, 0x54, 0x02, 0x86, 0xe4, 0x44, + 0x83, 0x11, 0x7e, 0x2a, 0x46, 0xfd, 0x1d, 0x83, 0xeb, 0x0e, 0x7e, 0x81, 0xbd, 0x8f, 0x32, 0x47, + 0xfb, 0x52, 0x19, 0xa2, 0xcd, 0x1b, 0xe2, 0xc3, 0x88, 0x88, 0xfa, 0x95, 0x33, 0xf7, 0xb1, 0x06, + 0x18, 0x4b, 0x56, 0xa4, 0x0d, 0xa5, 0xd7, 0xdc, 0x8d, 0xdc, 0x13, 0x77, 0xec, 0x38, 0xac, 0x70, + 0x27, 0xc5, 0x12, 0x90, 0x71, 0x20, 0xbf, 0x5a, 0x80, 0x53, 0x7e, 0xda, 0xe8, 0x94, 0xdd, 0x01, + 0xf3, 0x5b, 0xd7, 0x69, 0x33, 0x56, 0x46, 0x50, 0x0e, 0xca, 0xc6, 0xfe, 0xba, 0x30, 0xf9, 0x8b, + 0x5d, 0x15, 0xd9, 0x9d, 0x16, 0x73, 0xfe, 0xd3, 0x2d, 0x29, 0xff, 0x64, 0x1a, 0x4a, 0x56, 0xda, + 0x57, 0x8a, 0xd0, 0x88, 0xcd, 0xd6, 0xb9, 0x7f, 0xba, 0x72, 0x2b, 0xf5, 0xd3, 0x95, 0xd5, 0xe1, + 0x37, 0x19, 0xa3, 0x5a, 0x1d, 0xf7, 0x7f, 0x57, 0xfe, 0xb4, 0x08, 0xa5, 0xf5, 0xf9, 0x85, 0xe4, + 0x72, 0xb1, 0x70, 0x0f, 0x96, 0x8b, 0x5b, 0x50, 0xdd, 0xe8, 0x59, 0x76, 0x60, 0x39, 0xb9, 0x0f, + 0xec, 0xab, 0x7f, 0xd4, 0xc8, 0x73, 0xaf, 0x02, 0x15, 0x15, 0x3c, 0x69, 0x43, 0xb5, 0x2d, 0x6e, + 0x4c, 0xcb, 0x1d, 0x7a, 0x25, 0x6f, 0x5e, 0x13, 0x8c, 0xe4, 0x0b, 0x2a, 0x74, 0xed, 0x0b, 0x20, + 0xff, 0x72, 0x4d, 0xfc, 0xe3, 0x91, 0x66, 0x68, 0x8c, 0x66, 0x49, 0x54, 0xfb, 0x3c, 0x84, 0x96, + 0xc0, 0x3d, 0x6f, 0x4e, 0xed, 0x5f, 0x0b, 0x90, 0x34, 0x7e, 0xee, 0x7d, 0x8f, 0xda, 0x4e, 0xf7, + 0xa8, 0xf9, 0xa3, 0x18, 0x80, 0xd9, 0x9d, 0x4a, 0xfb, 0x83, 0x22, 0x8c, 0xdc, 0xb3, 0x83, 0x96, + 0x34, 0x11, 0x49, 0x36, 0x97, 0x73, 0x72, 0x1c, 0x18, 0x47, 0xd6, 0x49, 0xc5, 0x91, 0xe5, 0xfd, + 0xb3, 0xe6, 0x5d, 0xa2, 0xc8, 0xfe, 0xb2, 0x00, 0x72, 0x6a, 0x5e, 0x72, 0xfc, 0x40, 0x77, 0x0c, + 0xfe, 0x83, 0x77, 0xa9, 0x07, 0xf2, 0x86, 0x2b, 0xc8, 0x90, 0x1e, 0xa1, 0xfa, 0xf9, 0xb3, 0x9a, + 0xf7, 0xc9, 0x87, 0xa1, 0xb6, 0xe5, 0xfa, 0x01, 0x9f, 0xeb, 0x8b, 0x49, 0xdf, 0xce, 0x15, 0x99, + 0x8e, 0x21, 0x45, 0x7a, 0x5b, 0xaf, 0x32, 0x78, 0x5b, 0x4f, 0xfb, 0x56, 0x11, 0x46, 0xdf, 0x2b, + 0xa7, 0x45, 0xb3, 0xe2, 0xee, 0x4a, 0x39, 0xe3, 0xee, 0xca, 0x87, 0x89, 0xbb, 0xd3, 0xbe, 0x5b, + 0x00, 0xb8, 0x67, 0x47, 0x55, 0xcd, 0x64, 0x48, 0x5c, 0xee, 0x7e, 0x95, 0x1d, 0x10, 0xf7, 0xbb, + 0x15, 0xf5, 0x49, 0x3c, 0x1c, 0xee, 0xcd, 0x02, 0x8c, 0xeb, 0x89, 0x10, 0xb3, 0xdc, 0xe6, 0x65, + 0x2a, 0x62, 0x2d, 0x3c, 0x96, 0x97, 0x4c, 0xc7, 0x14, 0x5b, 0xf2, 0x5c, 0x74, 0x4d, 0xea, 0xb5, + 0xa8, 0xdb, 0xf7, 0xdd, 0x6f, 0xca, 0x4d, 0x9d, 0x04, 0xe5, 0x5d, 0x42, 0xfa, 0x4a, 0x47, 0x12, + 0xd2, 0x17, 0x3f, 0xac, 0x54, 0xbe, 0xe3, 0x61, 0xa5, 0x1d, 0xa8, 0x6f, 0x7a, 0x6e, 0x87, 0x47, + 0xcd, 0xc9, 0x7f, 0x72, 0x5e, 0xce, 0xa1, 0x53, 0xa2, 0xbf, 0x51, 0x47, 0xaa, 0x75, 0x41, 0xe1, + 0x63, 0xc4, 0x8a, 0x3b, 0xa5, 0x5d, 0xc1, 0x75, 0xe4, 0x28, 0xb9, 0x86, 0x73, 0xc9, 0x9a, 0x40, + 0x47, 0xc5, 0x26, 0x19, 0x29, 0x57, 0xbd, 0x37, 0x91, 0x72, 0xda, 0xcf, 0x97, 0xd5, 0x04, 0x76, + 0xdf, 0xdd, 0xc8, 0xf7, 0xde, 0x3f, 0xe2, 0x98, 0x3e, 0x7f, 0x58, 0xbd, 0x87, 0xe7, 0x0f, 0x6b, + 0x43, 0xc5, 0x65, 0xed, 0x95, 0x20, 0xb5, 0x76, 0x7a, 0x7f, 0x87, 0xe2, 0x3f, 0xd5, 0x0e, 0xc5, + 0x5b, 0x45, 0x88, 0x26, 0x82, 0x43, 0x86, 0x5a, 0xbc, 0x0c, 0xb5, 0x8e, 0x7e, 0x6b, 0x9e, 0xda, + 0xfa, 0x6e, 0x9e, 0x1f, 0x29, 0xae, 0x48, 0x0c, 0x0c, 0xd1, 0x88, 0x0f, 0x60, 0x85, 0x97, 0x19, + 0xe7, 0xf6, 0x38, 0x47, 0xf7, 0x22, 0x0b, 0x9f, 0x56, 0xf4, 0x8e, 0x31, 0x36, 0xda, 0x5f, 0x14, + 0x41, 0xde, 0x7a, 0x4d, 0x28, 0x54, 0x36, 0xad, 0x5b, 0xd4, 0xcc, 0x1d, 0x76, 0x18, 0xfb, 0xbd, + 0xad, 0x70, 0xa9, 0xf3, 0x04, 0x14, 0xe8, 0xdc, 0x57, 0x2a, 0xb6, 0x48, 0xa4, 0xfc, 0x72, 0xf8, + 0x4a, 0xe3, 0x5b, 0x2d, 0xd2, 0x57, 0x2a, 0x92, 0x50, 0xf1, 0x10, 0xae, 0x59, 0xbe, 0x4f, 0x2d, + 0x45, 0x9a, 0xc7, 0x35, 0x1b, 0xdb, 0xef, 0x56, 0xae, 0x59, 0x5f, 0x1c, 0x40, 0x96, 0x3c, 0x9a, + 0x9f, 0xfd, 0xce, 0xf7, 0x2e, 0x3c, 0xf0, 0xdd, 0xef, 0x5d, 0x78, 0xe0, 0x9d, 0xef, 0x5d, 0x78, + 0xe0, 0x4b, 0xfb, 0x17, 0x0a, 0xdf, 0xd9, 0xbf, 0x50, 0xf8, 0xee, 0xfe, 0x85, 0xc2, 0x3b, 0xfb, + 0x17, 0x0a, 0x7f, 0xbf, 0x7f, 0xa1, 0xf0, 0x4b, 0xff, 0x70, 0xe1, 0x81, 0xcf, 0x3c, 0x1b, 0x55, + 0x61, 0x5a, 0x55, 0x61, 0x5a, 0x31, 0x9c, 0xee, 0x6e, 0xb7, 0xa7, 0x59, 0x15, 0xa2, 0x14, 0x55, + 0x85, 0xff, 0x08, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x6f, 0x91, 0x67, 0x6c, 0x92, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -4448,7 +4664,7 @@ func (m *GetJetStreamStatefulSetSpecReq) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *GetRedisServiceSpecReq) Marshal() (dAtA []byte, err error) { +func (m *GetMonoVertexDaemonDeploymentReq) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4458,22 +4674,136 @@ func (m *GetRedisServiceSpecReq) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetRedisServiceSpecReq) MarshalTo(dAtA []byte) (int, error) { +func (m *GetMonoVertexDaemonDeploymentReq) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetRedisServiceSpecReq) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetMonoVertexDaemonDeploymentReq) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.SentinelContainerPort)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.RedisContainerPort)) + { + size, err := m.DefaultResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x10 + dAtA[i] = 0x22 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.PullPolicy) + copy(dAtA[i:], m.PullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PullPolicy))) + i-- + dAtA[i] = 0x12 + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GetMonoVertexPodSpecReq) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetMonoVertexPodSpecReq) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetMonoVertexPodSpecReq) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DefaultResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.PullPolicy) + copy(dAtA[i:], m.PullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PullPolicy))) + i-- + dAtA[i] = 0x12 + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GetRedisServiceSpecReq) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRedisServiceSpecReq) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetRedisServiceSpecReq) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.SentinelContainerPort)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.RedisContainerPort)) + i-- + dAtA[i] = 0x10 if len(m.Labels) > 0 { keysForLabels := make([]string, 0, len(m.Labels)) for k := range m.Labels { @@ -5710,7 +6040,7 @@ func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *NativeRedis) Marshal() (dAtA []byte, err error) { +func (m *MonoVertex) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5720,30 +6050,18 @@ func (m *NativeRedis) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NativeRedis) MarshalTo(dAtA []byte) (int, error) { +func (m *MonoVertex) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NativeRedis) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MonoVertex) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Settings != nil { - { - size, err := m.Settings.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } { - size, err := m.AbstractPodTemplate.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5751,81 +6069,31 @@ func (m *NativeRedis) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x42 - if m.Persistence != nil { - { - size, err := m.Persistence.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.InitContainerTemplate != nil { - { - size, err := m.InitContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.MetricsContainerTemplate != nil { - { - size, err := m.MetricsContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.SentinelContainerTemplate != nil { - { - size, err := m.SentinelContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RedisContainerTemplate != nil { - { - size, err := m.RedisContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a - } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x10 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *NatsAuth) Marshal() (dAtA []byte, err error) { +func (m *MonoVertexLimits) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5835,31 +6103,19 @@ func (m *NatsAuth) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NatsAuth) MarshalTo(dAtA []byte) (int, error) { +func (m *MonoVertexLimits) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NatsAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MonoVertexLimits) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.NKey != nil { - { - size, err := m.NKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Token != nil { + if m.ReadTimeout != nil { { - size, err := m.Token.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ReadTimeout.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5869,22 +6125,15 @@ func (m *NatsAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if m.Basic != nil { - { - size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if m.ReadBatchSize != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ReadBatchSize)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *NatsSource) Marshal() (dAtA []byte, err error) { +func (m *MonoVertexList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5894,82 +6143,44 @@ func (m *NatsSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NatsSource) MarshalTo(dAtA []byte) (int, error) { +func (m *MonoVertexList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NatsSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MonoVertexList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Auth != nil { - { - size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } - i-- - dAtA[i] = 0x2a } - if m.TLS != nil { - { - size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= len(m.Queue) - copy(dAtA[i:], m.Queue) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Queue))) - i-- - dAtA[i] = 0x1a - i -= len(m.Subject) - copy(dAtA[i:], m.Subject) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subject))) - i-- - dAtA[i] = 0x12 - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *NoStore) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NoStore) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NoStore) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *PBQStorage) Marshal() (dAtA []byte, err error) { +func (m *MonoVertexSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5979,19 +6190,19 @@ func (m *PBQStorage) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PBQStorage) MarshalTo(dAtA []byte) (int, error) { +func (m *MonoVertexSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PBQStorage) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MonoVertexSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.NoStore != nil { + if m.DaemonTemplate != nil { { - size, err := m.NoStore.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.DaemonTemplate.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5999,11 +6210,49 @@ func (m *PBQStorage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x5a } - if m.EmptyDir != nil { + if len(m.Sidecars) > 0 { + for iNdEx := len(m.Sidecars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sidecars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if len(m.InitContainers) > 0 { + for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + { + size, err := m.Scale.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + if m.Limits != nil { { - size, err := m.EmptyDir.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Limits.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6011,11 +6260,25 @@ func (m *PBQStorage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x3a } - if m.PersistentVolumeClaim != nil { + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.ContainerTemplate != nil { { - size, err := m.PersistentVolumeClaim.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6023,34 +6286,21 @@ func (m *PBQStorage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x2a } - return len(dAtA) - i, nil -} - -func (m *PersistenceStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + { + size, err := m.AbstractPodTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return dAtA[:n], nil -} - -func (m *PersistenceStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PersistenceStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.VolumeSize != nil { + i-- + dAtA[i] = 0x22 + if m.Sink != nil { { - size, err := m.VolumeSize.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Sink.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6060,24 +6310,27 @@ func (m *PersistenceStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x1a } - if m.AccessMode != nil { - i -= len(*m.AccessMode) - copy(dAtA[i:], *m.AccessMode) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AccessMode))) + if m.Source != nil { + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 } - if m.StorageClassName != nil { - i -= len(*m.StorageClassName) - copy(dAtA[i:], *m.StorageClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *Pipeline) Marshal() (dAtA []byte, err error) { +func (m *MonoVertexStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6087,18 +6340,21 @@ func (m *Pipeline) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Pipeline) MarshalTo(dAtA []byte) (int, error) { +func (m *MonoVertexStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Pipeline) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MonoVertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x48 { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.LastScaledAt.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6106,9 +6362,9 @@ func (m *Pipeline) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x42 { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.LastUpdated.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6116,9 +6372,32 @@ func (m *Pipeline) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x3a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + i -= len(m.Selector) + copy(dAtA[i:], m.Selector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x18 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- dAtA[i] = 0x12 { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6130,7 +6409,7 @@ func (m *Pipeline) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PipelineLimits) Marshal() (dAtA []byte, err error) { +func (m *NativeRedis) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6140,19 +6419,19 @@ func (m *PipelineLimits) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PipelineLimits) MarshalTo(dAtA []byte) (int, error) { +func (m *NativeRedis) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PipelineLimits) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NativeRedis) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ReadTimeout != nil { + if m.Settings != nil { { - size, err := m.ReadTimeout.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Settings.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6160,27 +6439,92 @@ func (m *PipelineLimits) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0x4a } - if m.BufferUsageLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.BufferUsageLimit)) - i-- - dAtA[i] = 0x18 + { + size, err := m.AbstractPodTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.BufferMaxLength != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.BufferMaxLength)) + i-- + dAtA[i] = 0x42 + if m.Persistence != nil { + { + size, err := m.Persistence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x10 + dAtA[i] = 0x3a } - if m.ReadBatchSize != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ReadBatchSize)) + if m.InitContainerTemplate != nil { + { + size, err := m.InitContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x8 + dAtA[i] = 0x32 + } + if m.MetricsContainerTemplate != nil { + { + size, err := m.MetricsContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.SentinelContainerTemplate != nil { + { + size, err := m.SentinelContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.RedisContainerTemplate != nil { + { + size, err := m.RedisContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x10 } + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *PipelineList) Marshal() (dAtA []byte, err error) { +func (m *NatsAuth) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6190,44 +6534,56 @@ func (m *PipelineList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PipelineList) MarshalTo(dAtA []byte) (int, error) { +func (m *NatsAuth) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PipelineList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NatsAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.NKey != nil { + { + size, err := m.NKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Token != nil { + { + size, err := m.Token.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Basic != nil { + { + size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *PipelineSpec) Marshal() (dAtA []byte, err error) { +func (m *NatsSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6237,33 +6593,19 @@ func (m *PipelineSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PipelineSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *NatsSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PipelineSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NatsSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.SideInputs) > 0 { - for iNdEx := len(m.SideInputs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.SideInputs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if m.Templates != nil { + if m.Auth != nil { { - size, err := m.Templates.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6271,21 +6613,11 @@ func (m *PipelineSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x3a - } - { - size, err := m.Watermark.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x2a } - i-- - dAtA[i] = 0x32 - if m.Limits != nil { + if m.TLS != nil { { - size, err := m.Limits.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6293,55 +6625,27 @@ func (m *PipelineSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a - } - { - size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x22 } + i -= len(m.Queue) + copy(dAtA[i:], m.Queue) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Queue))) i-- - dAtA[i] = 0x22 - if len(m.Edges) > 0 { - for iNdEx := len(m.Edges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Edges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Vertices) > 0 { - for iNdEx := len(m.Vertices) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Vertices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.InterStepBufferServiceName) - copy(dAtA[i:], m.InterStepBufferServiceName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterStepBufferServiceName))) + dAtA[i] = 0x1a + i -= len(m.Subject) + copy(dAtA[i:], m.Subject) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subject))) + i-- + dAtA[i] = 0x12 + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *PipelineStatus) Marshal() (dAtA []byte, err error) { +func (m *NoStore) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6351,105 +6655,54 @@ func (m *PipelineStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PipelineStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *NoStore) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PipelineStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NoStore) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x58 - if m.ReduceUDFCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ReduceUDFCount)) + return len(dAtA) - i, nil +} + +func (m *PBQStorage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PBQStorage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PBQStorage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NoStore != nil { + { + size, err := m.NoStore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x50 + dAtA[i] = 0x1a } - if m.MapUDFCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MapUDFCount)) - i-- - dAtA[i] = 0x48 - } - if m.UDFCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.UDFCount)) - i-- - dAtA[i] = 0x40 - } - if m.SinkCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.SinkCount)) - i-- - dAtA[i] = 0x38 - } - if m.SourceCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.SourceCount)) - i-- - dAtA[i] = 0x30 - } - if m.VertexCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.VertexCount)) - i-- - dAtA[i] = 0x28 - } - { - size, err := m.LastUpdated.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x1a - i -= len(m.Phase) - copy(dAtA[i:], m.Phase) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i-- - dAtA[i] = 0x12 - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RedisBufferService) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RedisBufferService) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RedisBufferService) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.External != nil { + if m.EmptyDir != nil { { - size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.EmptyDir.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6459,9 +6712,9 @@ func (m *RedisBufferService) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if m.Native != nil { + if m.PersistentVolumeClaim != nil { { - size, err := m.Native.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.PersistentVolumeClaim.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6474,7 +6727,7 @@ func (m *RedisBufferService) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RedisConfig) Marshal() (dAtA []byte, err error) { +func (m *PersistenceStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6484,19 +6737,19 @@ func (m *RedisConfig) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RedisConfig) MarshalTo(dAtA []byte) (int, error) { +func (m *PersistenceStrategy) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RedisConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PersistenceStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.SentinelPassword != nil { + if m.VolumeSize != nil { { - size, err := m.SentinelPassword.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.VolumeSize.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6504,44 +6757,26 @@ func (m *RedisConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x32 + dAtA[i] = 0x1a } - if m.Password != nil { - { - size, err := m.Password.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if m.AccessMode != nil { + i -= len(*m.AccessMode) + copy(dAtA[i:], *m.AccessMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AccessMode))) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x12 + } + if m.StorageClassName != nil { + i -= len(*m.StorageClassName) + copy(dAtA[i:], *m.StorageClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) + i-- + dAtA[i] = 0xa } - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0x22 - i -= len(m.MasterName) - copy(dAtA[i:], m.MasterName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MasterName))) - i-- - dAtA[i] = 0x1a - i -= len(m.SentinelURL) - copy(dAtA[i:], m.SentinelURL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SentinelURL))) - i-- - dAtA[i] = 0x12 - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *RedisSettings) Marshal() (dAtA []byte, err error) { +func (m *Pipeline) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6551,40 +6786,50 @@ func (m *RedisSettings) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RedisSettings) MarshalTo(dAtA []byte) (int, error) { +func (m *Pipeline) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RedisSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Pipeline) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Sentinel) - copy(dAtA[i:], m.Sentinel) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Sentinel))) - i-- - dAtA[i] = 0x22 - i -= len(m.Replica) - copy(dAtA[i:], m.Replica) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Replica))) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x1a - i -= len(m.Master) - copy(dAtA[i:], m.Master) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Master))) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 - i -= len(m.Redis) - copy(dAtA[i:], m.Redis) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Redis))) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *SASL) Marshal() (dAtA []byte, err error) { +func (m *PipelineLimits) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6594,31 +6839,19 @@ func (m *SASL) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SASL) MarshalTo(dAtA []byte) (int, error) { +func (m *PipelineLimits) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SASL) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PipelineLimits) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.SCRAMSHA512 != nil { - { - size, err := m.SCRAMSHA512.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.SCRAMSHA256 != nil { + if m.ReadTimeout != nil { { - size, err := m.SCRAMSHA256.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ReadTimeout.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6628,41 +6861,25 @@ func (m *SASL) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if m.Plain != nil { - { - size, err := m.Plain.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if m.BufferUsageLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BufferUsageLimit)) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x18 } - if m.GSSAPI != nil { - { - size, err := m.GSSAPI.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if m.BufferMaxLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BufferMaxLength)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if m.Mechanism != nil { - i -= len(*m.Mechanism) - copy(dAtA[i:], *m.Mechanism) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Mechanism))) + if m.ReadBatchSize != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ReadBatchSize)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *SASLPlain) Marshal() (dAtA []byte, err error) { +func (m *PipelineList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6672,27 +6889,80 @@ func (m *SASLPlain) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SASLPlain) MarshalTo(dAtA []byte) (int, error) { +func (m *PipelineList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SASLPlain) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PipelineList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i-- - if m.Handshake { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - if m.PasswordSecret != nil { - { - size, err := m.PasswordSecret.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PipelineSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PipelineSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PipelineSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SideInputs) > 0 { + for iNdEx := len(m.SideInputs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SideInputs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.Templates != nil { + { + size, err := m.Templates.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6700,11 +6970,21 @@ func (m *SASLPlain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x3a } - if m.UserSecret != nil { + { + size, err := m.Watermark.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if m.Limits != nil { { - size, err := m.UserSecret.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Limits.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6712,12 +6992,55 @@ func (m *SASLPlain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x2a + } + { + size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Edges) > 0 { + for iNdEx := len(m.Edges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Edges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Vertices) > 0 { + for iNdEx := len(m.Vertices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Vertices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } + i -= len(m.InterStepBufferServiceName) + copy(dAtA[i:], m.InterStepBufferServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterStepBufferServiceName))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *Scale) Marshal() (dAtA []byte, err error) { +func (m *PipelineStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6727,78 +7050,83 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Scale) MarshalTo(dAtA []byte) (int, error) { +func (m *PipelineStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PipelineStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ScaleDownCooldownSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ScaleDownCooldownSeconds)) - i-- - dAtA[i] = 0x58 - } - if m.ScaleUpCooldownSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ScaleUpCooldownSeconds)) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x58 + if m.ReduceUDFCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ReduceUDFCount)) i-- dAtA[i] = 0x50 } - if m.ReplicasPerScale != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ReplicasPerScale)) + if m.MapUDFCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MapUDFCount)) i-- dAtA[i] = 0x48 } - if m.TargetBufferAvailability != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetBufferAvailability)) + if m.UDFCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.UDFCount)) i-- dAtA[i] = 0x40 } - if m.TargetProcessingSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetProcessingSeconds)) + if m.SinkCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinkCount)) i-- dAtA[i] = 0x38 } - if m.ZeroReplicaSleepSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ZeroReplicaSleepSeconds)) + if m.SourceCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SourceCount)) i-- dAtA[i] = 0x30 } - if m.DeprecatedCooldownSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.DeprecatedCooldownSeconds)) + if m.VertexCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.VertexCount)) i-- dAtA[i] = 0x28 } - if m.LookbackSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.LookbackSeconds)) - i-- - dAtA[i] = 0x20 - } - if m.Max != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Max)) - i-- - dAtA[i] = 0x18 - } - if m.Min != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Min)) - i-- - dAtA[i] = 0x10 + { + size, err := m.LastUpdated.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ServingSource) Marshal() (dAtA []byte, err error) { +func (m *RedisBufferService) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6808,19 +7136,19 @@ func (m *ServingSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ServingSource) MarshalTo(dAtA []byte) (int, error) { +func (m *RedisBufferService) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServingSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RedisBufferService) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Store != nil { + if m.External != nil { { - size, err := m.Store.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6828,26 +7156,11 @@ func (m *ServingSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 - } - if m.MsgIDHeaderKey != nil { - i -= len(*m.MsgIDHeaderKey) - copy(dAtA[i:], *m.MsgIDHeaderKey) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MsgIDHeaderKey))) - i-- - dAtA[i] = 0x1a - } - i-- - if m.Service { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x12 } - i-- - dAtA[i] = 0x10 - if m.Auth != nil { + if m.Native != nil { { - size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Native.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6860,7 +7173,7 @@ func (m *ServingSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ServingStore) Marshal() (dAtA []byte, err error) { +func (m *RedisConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6870,19 +7183,19 @@ func (m *ServingStore) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ServingStore) MarshalTo(dAtA []byte) (int, error) { +func (m *RedisConfig) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServingStore) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RedisConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.TTL != nil { + if m.SentinelPassword != nil { { - size, err := m.TTL.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.SentinelPassword.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6890,102 +7203,11 @@ func (m *ServingStore) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x32 } - if m.URL != nil { - i -= len(*m.URL) - copy(dAtA[i:], *m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SessionWindow) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SessionWindow) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SessionWindow) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Timeout != nil { - { - size, err := m.Timeout.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SideInput) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SideInput) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SideInput) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Trigger != nil { - { - size, err := m.Trigger.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Container != nil { + if m.Password != nil { { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Password.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6993,17 +7215,32 @@ func (m *SideInput) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x2a } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x22 + i -= len(m.MasterName) + copy(dAtA[i:], m.MasterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MasterName))) + i-- + dAtA[i] = 0x1a + i -= len(m.SentinelURL) + copy(dAtA[i:], m.SentinelURL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SentinelURL))) + i-- + dAtA[i] = 0x12 + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *SideInputTrigger) Marshal() (dAtA []byte, err error) { +func (m *RedisSettings) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7013,32 +7250,40 @@ func (m *SideInputTrigger) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SideInputTrigger) MarshalTo(dAtA []byte) (int, error) { +func (m *RedisSettings) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SideInputTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RedisSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Timezone != nil { - i -= len(*m.Timezone) - copy(dAtA[i:], *m.Timezone) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Timezone))) - i-- - dAtA[i] = 0x12 - } - i -= len(m.Schedule) - copy(dAtA[i:], m.Schedule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i -= len(m.Sentinel) + copy(dAtA[i:], m.Sentinel) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Sentinel))) + i-- + dAtA[i] = 0x22 + i -= len(m.Replica) + copy(dAtA[i:], m.Replica) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Replica))) + i-- + dAtA[i] = 0x1a + i -= len(m.Master) + copy(dAtA[i:], m.Master) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Master))) + i-- + dAtA[i] = 0x12 + i -= len(m.Redis) + copy(dAtA[i:], m.Redis) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Redis))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *SideInputsManagerTemplate) Marshal() (dAtA []byte, err error) { +func (m *SASL) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7048,19 +7293,19 @@ func (m *SideInputsManagerTemplate) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SideInputsManagerTemplate) MarshalTo(dAtA []byte) (int, error) { +func (m *SASL) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SideInputsManagerTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SASL) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.InitContainerTemplate != nil { + if m.SCRAMSHA512 != nil { { - size, err := m.InitContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.SCRAMSHA512.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7068,11 +7313,11 @@ func (m *SideInputsManagerTemplate) MarshalToSizedBuffer(dAtA []byte) (int, erro i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x2a } - if m.ContainerTemplate != nil { + if m.SCRAMSHA256 != nil { { - size, err := m.ContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.SCRAMSHA256.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7080,44 +7325,23 @@ func (m *SideInputsManagerTemplate) MarshalToSizedBuffer(dAtA []byte) (int, erro i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x22 } - { - size, err := m.AbstractPodTemplate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Plain != nil { + { + size, err := m.Plain.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Sink) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + i-- + dAtA[i] = 0x1a } - return dAtA[:n], nil -} - -func (m *Sink) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Sink) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Fallback != nil { + if m.GSSAPI != nil { { - size, err := m.Fallback.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.GSSAPI.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7127,20 +7351,17 @@ func (m *Sink) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - { - size, err := m.AbstractSink.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Mechanism != nil { + i -= len(*m.Mechanism) + copy(dAtA[i:], *m.Mechanism) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Mechanism))) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *SlidingWindow) Marshal() (dAtA []byte, err error) { +func (m *SASLPlain) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7150,27 +7371,27 @@ func (m *SlidingWindow) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SlidingWindow) MarshalTo(dAtA []byte) (int, error) { +func (m *SASLPlain) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SlidingWindow) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SASLPlain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l i-- - if m.Streaming { + if m.Handshake { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x18 - if m.Slide != nil { + if m.PasswordSecret != nil { { - size, err := m.Slide.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.PasswordSecret.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7180,9 +7401,9 @@ func (m *SlidingWindow) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if m.Length != nil { + if m.UserSecret != nil { { - size, err := m.Length.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.UserSecret.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7195,7 +7416,7 @@ func (m *SlidingWindow) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Source) Marshal() (dAtA []byte, err error) { +func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7205,67 +7426,100 @@ func (m *Source) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Source) MarshalTo(dAtA []byte) (int, error) { +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Source) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Serving != nil { - { - size, err := m.Serving.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if m.ScaleDownCooldownSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ScaleDownCooldownSeconds)) i-- - dAtA[i] = 0x42 + dAtA[i] = 0x58 } - if m.JetStream != nil { - { - size, err := m.JetStream.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if m.ScaleUpCooldownSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ScaleUpCooldownSeconds)) i-- - dAtA[i] = 0x3a + dAtA[i] = 0x50 } - if m.UDSource != nil { - { - size, err := m.UDSource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if m.ReplicasPerScale != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ReplicasPerScale)) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x48 } - if m.UDTransformer != nil { - { - size, err := m.UDTransformer.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if m.TargetBufferAvailability != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetBufferAvailability)) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x40 } - if m.Nats != nil { + if m.TargetProcessingSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetProcessingSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.ZeroReplicaSleepSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ZeroReplicaSleepSeconds)) + i-- + dAtA[i] = 0x30 + } + if m.DeprecatedCooldownSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeprecatedCooldownSeconds)) + i-- + dAtA[i] = 0x28 + } + if m.LookbackSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LookbackSeconds)) + i-- + dAtA[i] = 0x20 + } + if m.Max != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Max)) + i-- + dAtA[i] = 0x18 + } + if m.Min != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Min)) + i-- + dAtA[i] = 0x10 + } + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ServingSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServingSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServingSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Store != nil { { - size, err := m.Nats.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Store.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7275,21 +7529,24 @@ func (m *Source) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if m.HTTP != nil { - { - size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if m.MsgIDHeaderKey != nil { + i -= len(*m.MsgIDHeaderKey) + copy(dAtA[i:], *m.MsgIDHeaderKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MsgIDHeaderKey))) i-- dAtA[i] = 0x1a } - if m.Kafka != nil { + i-- + if m.Service { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + if m.Auth != nil { { - size, err := m.Kafka.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7297,11 +7554,34 @@ func (m *Source) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa } - if m.Generator != nil { + return len(dAtA) - i, nil +} + +func (m *ServingStore) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServingStore) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServingStore) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TTL != nil { { - size, err := m.Generator.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.TTL.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7309,12 +7589,19 @@ func (m *Source) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x12 + } + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *Status) Marshal() (dAtA []byte, err error) { +func (m *SessionWindow) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7324,34 +7611,32 @@ func (m *Status) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Status) MarshalTo(dAtA []byte) (int, error) { +func (m *SessionWindow) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SessionWindow) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Timeout != nil { + { + size, err := m.Timeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *TLS) Marshal() (dAtA []byte, err error) { +func (m *SideInput) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7361,19 +7646,19 @@ func (m *TLS) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TLS) MarshalTo(dAtA []byte) (int, error) { +func (m *SideInput) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SideInput) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.KeySecret != nil { + if m.Trigger != nil { { - size, err := m.KeySecret.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Trigger.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7383,21 +7668,23 @@ func (m *TLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if m.CertSecret != nil { - { - size, err := m.CertSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - i-- - dAtA[i] = 0x1a } - if m.CACertSecret != nil { + if m.Container != nil { { - size, err := m.CACertSecret.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7407,18 +7694,15 @@ func (m *TLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- - if m.InsecureSkipVerify { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *TagConditions) Marshal() (dAtA []byte, err error) { +func (m *SideInputTrigger) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7428,36 +7712,32 @@ func (m *TagConditions) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TagConditions) MarshalTo(dAtA []byte) (int, error) { +func (m *SideInputTrigger) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TagConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SideInputTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Values[iNdEx]) - copy(dAtA[i:], m.Values[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Operator != nil { - i -= len(*m.Operator) - copy(dAtA[i:], *m.Operator) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Operator))) + if m.Timezone != nil { + i -= len(*m.Timezone) + copy(dAtA[i:], *m.Timezone) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Timezone))) i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 } + i -= len(m.Schedule) + copy(dAtA[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *Templates) Marshal() (dAtA []byte, err error) { +func (m *SideInputsManagerTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7467,31 +7747,19 @@ func (m *Templates) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Templates) MarshalTo(dAtA []byte) (int, error) { +func (m *SideInputsManagerTemplate) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Templates) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SideInputsManagerTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.VertexTemplate != nil { - { - size, err := m.VertexTemplate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.SideInputsManagerTemplate != nil { + if m.InitContainerTemplate != nil { { - size, err := m.SideInputsManagerTemplate.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.InitContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7501,9 +7769,9 @@ func (m *Templates) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x1a } - if m.JobTemplate != nil { + if m.ContainerTemplate != nil { { - size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7513,22 +7781,20 @@ func (m *Templates) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if m.DaemonTemplate != nil { - { - size, err := m.DaemonTemplate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + { + size, err := m.AbstractPodTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *Transformer) Marshal() (dAtA []byte, err error) { +func (m *Sink) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7538,58 +7804,42 @@ func (m *Transformer) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Transformer) MarshalTo(dAtA []byte) (int, error) { +func (m *Sink) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Transformer) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Sink) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.KWArgs) > 0 { - keysForKWArgs := make([]string, 0, len(m.KWArgs)) - for k := range m.KWArgs { - keysForKWArgs = append(keysForKWArgs, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForKWArgs) - for iNdEx := len(keysForKWArgs) - 1; iNdEx >= 0; iNdEx-- { - v := m.KWArgs[string(keysForKWArgs[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForKWArgs[iNdEx]) - copy(dAtA[i:], keysForKWArgs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForKWArgs[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a + if m.Fallback != nil { + { + size, err := m.Fallback.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.Args) > 0 { - for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Args[iNdEx]) - copy(dAtA[i:], m.Args[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) - i-- - dAtA[i] = 0x12 + { + size, err := m.AbstractSink.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *UDF) Marshal() (dAtA []byte, err error) { +func (m *SlidingWindow) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7599,31 +7849,27 @@ func (m *UDF) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UDF) MarshalTo(dAtA []byte) (int, error) { +func (m *SlidingWindow) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *UDF) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SlidingWindow) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.GroupBy != nil { - { - size, err := m.GroupBy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + i-- + if m.Streaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.Builtin != nil { + i-- + dAtA[i] = 0x18 + if m.Slide != nil { { - size, err := m.Builtin.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Slide.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7633,9 +7879,9 @@ func (m *UDF) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if m.Container != nil { + if m.Length != nil { { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Length.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7648,7 +7894,7 @@ func (m *UDF) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *UDSink) Marshal() (dAtA []byte, err error) { +func (m *Source) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7658,52 +7904,43 @@ func (m *UDSink) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UDSink) MarshalTo(dAtA []byte) (int, error) { +func (m *Source) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *UDSink) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Source) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Serving != nil { + { + size, err := m.Serving.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UDSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.JetStream != nil { + { + size, err := m.JetStream.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - return dAtA[:n], nil -} - -func (m *UDSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UDSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Container != nil { + if m.UDSource != nil { { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.UDSource.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7711,34 +7948,23 @@ func (m *UDSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x32 } - return len(dAtA) - i, nil -} - -func (m *UDTransformer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.UDTransformer != nil { + { + size, err := m.UDTransformer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - return dAtA[:n], nil -} - -func (m *UDTransformer) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UDTransformer) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Builtin != nil { + if m.Nats != nil { { - size, err := m.Builtin.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Nats.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7746,11 +7972,11 @@ func (m *UDTransformer) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x22 } - if m.Container != nil { + if m.HTTP != nil { { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7758,12 +7984,36 @@ func (m *UDTransformer) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a } - return len(dAtA) - i, nil -} - -func (m *Vertex) Marshal() (dAtA []byte, err error) { + if m.Kafka != nil { + { + size, err := m.Kafka.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Generator != nil { + { + size, err := m.Generator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Status) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7773,50 +8023,34 @@ func (m *Vertex) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Vertex) MarshalTo(dAtA []byte) (int, error) { +func (m *Status) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Vertex) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *VertexInstance) Marshal() (dAtA []byte, err error) { +func (m *TLS) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7826,27 +8060,19 @@ func (m *VertexInstance) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VertexInstance) MarshalTo(dAtA []byte) (int, error) { +func (m *TLS) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VertexInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Replica)) - i-- - dAtA[i] = 0x18 - i -= len(m.Hostname) - copy(dAtA[i:], m.Hostname) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i-- - dAtA[i] = 0x12 - if m.Vertex != nil { + if m.KeySecret != nil { { - size, err := m.Vertex.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.KeySecret.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7854,12 +8080,44 @@ func (m *VertexInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x22 + } + if m.CertSecret != nil { + { + size, err := m.CertSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.CACertSecret != nil { + { + size, err := m.CACertSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- + if m.InsecureSkipVerify { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *VertexLimits) Marshal() (dAtA []byte, err error) { +func (m *TagConditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7869,47 +8127,36 @@ func (m *VertexLimits) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VertexLimits) MarshalTo(dAtA []byte) (int, error) { +func (m *TagConditions) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VertexLimits) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TagConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.BufferUsageLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.BufferUsageLimit)) - i-- - dAtA[i] = 0x20 - } - if m.BufferMaxLength != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.BufferMaxLength)) - i-- - dAtA[i] = 0x18 - } - if m.ReadTimeout != nil { - { - size, err := m.ReadTimeout.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i-- - dAtA[i] = 0x12 } - if m.ReadBatchSize != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ReadBatchSize)) + if m.Operator != nil { + i -= len(*m.Operator) + copy(dAtA[i:], *m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Operator))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VertexList) Marshal() (dAtA []byte, err error) { +func (m *Templates) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7919,44 +8166,68 @@ func (m *VertexList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VertexList) MarshalTo(dAtA []byte) (int, error) { +func (m *Templates) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VertexList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Templates) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.VertexTemplate != nil { + { + size, err := m.VertexTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.SideInputsManagerTemplate != nil { + { + size, err := m.SideInputsManagerTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.JobTemplate != nil { + { + size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.DaemonTemplate != nil { + { + size, err := m.DaemonTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *VertexSpec) Marshal() (dAtA []byte, err error) { +func (m *Transformer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7966,83 +8237,58 @@ func (m *VertexSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VertexSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *Transformer) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VertexSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Transformer) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Watermark.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.KWArgs) > 0 { + keysForKWArgs := make([]string, 0, len(m.KWArgs)) + for k := range m.KWArgs { + keysForKWArgs = append(keysForKWArgs, string(k)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - if len(m.ToEdges) > 0 { - for iNdEx := len(m.ToEdges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ToEdges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForKWArgs) + for iNdEx := len(keysForKWArgs) - 1; iNdEx >= 0; iNdEx-- { + v := m.KWArgs[string(keysForKWArgs[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) i-- - dAtA[i] = 0x32 - } - } - if len(m.FromEdges) > 0 { - for iNdEx := len(m.FromEdges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.FromEdges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x12 + i -= len(keysForKWArgs[iNdEx]) + copy(dAtA[i:], keysForKWArgs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForKWArgs[iNdEx]))) i-- - dAtA[i] = 0x2a + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a } } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x20 - } - i -= len(m.InterStepBufferServiceName) - copy(dAtA[i:], m.InterStepBufferServiceName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterStepBufferServiceName))) - i-- - dAtA[i] = 0x1a - i -= len(m.PipelineName) - copy(dAtA[i:], m.PipelineName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PipelineName))) - i-- - dAtA[i] = 0x12 - { - size, err := m.AbstractVertex.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *VertexStatus) Marshal() (dAtA []byte, err error) { +func (m *UDF) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -8052,66 +8298,56 @@ func (m *VertexStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VertexStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *UDF) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *UDF) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x40 - { - size, err := m.LastScaledAt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.GroupBy != nil { + { + size, err := m.GroupBy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - i-- - dAtA[i] = 0x3a - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x32 - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x2a - i -= len(m.Selector) - copy(dAtA[i:], m.Selector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) - i-- - dAtA[i] = 0x22 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x18 - i -= len(m.Phase) - copy(dAtA[i:], m.Phase) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i-- - dAtA[i] = 0x12 - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Builtin != nil { + { + size, err := m.Builtin.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Container != nil { + { + size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *VertexTemplate) Marshal() (dAtA []byte, err error) { +func (m *UDSink) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -8121,42 +8357,18 @@ func (m *VertexTemplate) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VertexTemplate) MarshalTo(dAtA []byte) (int, error) { +func (m *UDSink) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VertexTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *UDSink) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.InitContainerTemplate != nil { - { - size, err := m.InitContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.ContainerTemplate != nil { - { - size, err := m.ContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } { - size, err := m.AbstractPodTemplate.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -8168,7 +8380,7 @@ func (m *VertexTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Watermark) Marshal() (dAtA []byte, err error) { +func (m *UDSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -8178,31 +8390,19 @@ func (m *Watermark) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Watermark) MarshalTo(dAtA []byte) (int, error) { +func (m *UDSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Watermark) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *UDSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.IdleSource != nil { - { - size, err := m.IdleSource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.MaxDelay != nil { + if m.Container != nil { { - size, err := m.MaxDelay.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -8210,20 +8410,12 @@ func (m *Watermark) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - } - i-- - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0xa } - i-- - dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *Window) Marshal() (dAtA []byte, err error) { +func (m *UDTransformer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -8233,31 +8425,19 @@ func (m *Window) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Window) MarshalTo(dAtA []byte) (int, error) { +func (m *UDTransformer) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Window) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *UDTransformer) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Session != nil { - { - size, err := m.Session.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Sliding != nil { + if m.Builtin != nil { { - size, err := m.Sliding.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Builtin.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -8267,9 +8447,9 @@ func (m *Window) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if m.Fixed != nil { + if m.Container != nil { { - size, err := m.Fixed.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -8282,1026 +8462,1151 @@ func (m *Window) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *Vertex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *AbstractPodTemplate) Size() (n int) { - if m == nil { - return 0 - } + +func (m *Vertex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Vertex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - l = len(m.PriorityClassName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Priority != nil { - n += 1 + sovGenerated(uint64(*m.Priority)) - } - if m.Affinity != nil { - l = m.Affinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - if m.RuntimeClassName != nil { - l = len(*m.RuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AutomountServiceAccountToken != nil { - n += 2 - } - l = len(m.DNSPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.DNSConfig != nil { - l = m.DNSConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *AbstractSink) Size() (n int) { - if m == nil { - return 0 +func (m *VertexInstance) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *VertexInstance) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VertexInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Log != nil { - l = m.Log.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Kafka != nil { - l = m.Kafka.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Blackhole != nil { - l = m.Blackhole.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.UDSink != nil { - l = m.UDSink.Size() - n += 1 + l + sovGenerated(uint64(l)) + i = encodeVarintGenerated(dAtA, i, uint64(m.Replica)) + i-- + dAtA[i] = 0x18 + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x12 + if m.Vertex != nil { + { + size, err := m.Vertex.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *AbstractVertex) Size() (n int) { - if m == nil { - return 0 +func (m *VertexLimits) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *VertexLimits) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VertexLimits) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Source != nil { - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Sink != nil { - l = m.Sink.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.UDF != nil { - l = m.UDF.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ContainerTemplate != nil { - l = m.ContainerTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.InitContainerTemplate != nil { - l = m.InitContainerTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.AbstractPodTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Limits != nil { - l = m.Limits.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Scale.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.InitContainers) > 0 { - for _, e := range m.InitContainers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Sidecars) > 0 { - for _, e := range m.Sidecars { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.BufferUsageLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BufferUsageLimit)) + i-- + dAtA[i] = 0x20 } - if m.Partitions != nil { - n += 1 + sovGenerated(uint64(*m.Partitions)) + if m.BufferMaxLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BufferMaxLength)) + i-- + dAtA[i] = 0x18 } - if len(m.SideInputs) > 0 { - for _, s := range m.SideInputs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.ReadTimeout != nil { + { + size, err := m.ReadTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if m.SideInputsContainerTemplate != nil { - l = m.SideInputsContainerTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ReadBatchSize != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ReadBatchSize)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *Authorization) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Token != nil { - l = m.Token.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *VertexList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *BasicAuth) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.User != nil { - l = m.User.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Password != nil { - l = m.Password.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Blackhole) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +func (m *VertexList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *BufferServiceConfig) Size() (n int) { - if m == nil { - return 0 - } +func (m *VertexList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Redis != nil { - l = m.Redis.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - if m.JetStream != nil { - l = m.JetStream.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *CombinedEdge) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Edge.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FromVertexType) - n += 1 + l + sovGenerated(uint64(l)) - if m.FromVertexPartitionCount != nil { - n += 1 + sovGenerated(uint64(*m.FromVertexPartitionCount)) - } - if m.FromVertexLimits != nil { - l = m.FromVertexLimits.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.ToVertexType) - n += 1 + l + sovGenerated(uint64(l)) - if m.ToVertexPartitionCount != nil { - n += 1 + sovGenerated(uint64(*m.ToVertexPartitionCount)) - } - if m.ToVertexLimits != nil { - l = m.ToVertexLimits.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *VertexSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Container) Size() (n int) { - if m == nil { - return 0 - } +func (m *VertexSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VertexSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Watermark.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x3a + if len(m.ToEdges) > 0 { + for iNdEx := len(m.ToEdges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ToEdges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } } - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.FromEdges) > 0 { + for iNdEx := len(m.FromEdges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FromEdges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } } - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x20 } - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.InterStepBufferServiceName) + copy(dAtA[i:], m.InterStepBufferServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterStepBufferServiceName))) + i-- + dAtA[i] = 0x1a + i -= len(m.PipelineName) + copy(dAtA[i:], m.PipelineName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PipelineName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.AbstractVertex.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ImagePullPolicy != nil { - l = len(*m.ImagePullPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ContainerTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImagePullPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *VertexStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *DaemonTemplate) Size() (n int) { - if m == nil { - return 0 - } +func (m *VertexStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.AbstractPodTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x40 + { + size, err := m.LastScaledAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ContainerTemplate != nil { - l = m.ContainerTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.InitContainerTemplate != nil { - l = m.InitContainerTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x3a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + i -= len(m.Selector) + copy(dAtA[i:], m.Selector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x18 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Edge) Size() (n int) { - if m == nil { - return 0 +func (m *VertexTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *VertexTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VertexTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.From) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.To) - n += 1 + l + sovGenerated(uint64(l)) - if m.Conditions != nil { - l = m.Conditions.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.InitContainerTemplate != nil { + { + size, err := m.InitContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - if m.OnFull != nil { - l = len(*m.OnFull) - n += 1 + l + sovGenerated(uint64(l)) + if m.ContainerTemplate != nil { + { + size, err := m.ContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - return n + { + size, err := m.AbstractPodTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *FixedWindow) Size() (n int) { - if m == nil { - return 0 +func (m *Watermark) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *Watermark) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Watermark) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Length != nil { - l = m.Length.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.IdleSource != nil { + { + size, err := m.IdleSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - n += 2 - return n + if m.MaxDelay != nil { + { + size, err := m.MaxDelay.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *ForwardConditions) Size() (n int) { - if m == nil { - return 0 +func (m *Window) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *Window) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Window) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Tags != nil { - l = m.Tags.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Session != nil { + { + size, err := m.Session.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - return n + if m.Sliding != nil { + { + size, err := m.Sliding.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Fixed != nil { + { + size, err := m.Fixed.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *Function) Size() (n int) { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AbstractPodTemplate) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.KWArgs) > 0 { - for k, v := range m.KWArgs { + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { _ = k _ = v mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - return n -} - -func (m *GSSAPI) Size() (n int) { - if m == nil { - return 0 + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - var l int - _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Realm) - n += 1 + l + sovGenerated(uint64(l)) - if m.UsernameSecret != nil { - l = m.UsernameSecret.Size() + if m.SecurityContext != nil { + l = m.SecurityContext.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.AuthType != nil { - l = len(*m.AuthType) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.PasswordSecret != nil { - l = m.PasswordSecret.Size() + l = len(m.PriorityClassName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 1 + sovGenerated(uint64(*m.Priority)) + } + if m.Affinity != nil { + l = m.Affinity.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.KeytabSecret != nil { - l = m.KeytabSecret.Size() + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + if m.RuntimeClassName != nil { + l = len(*m.RuntimeClassName) n += 1 + l + sovGenerated(uint64(l)) } - if m.KerberosConfigSecret != nil { - l = m.KerberosConfigSecret.Size() + if m.AutomountServiceAccountToken != nil { + n += 2 + } + l = len(m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.DNSConfig != nil { + l = m.DNSConfig.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *GeneratorSource) Size() (n int) { +func (m *AbstractSink) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.RPU != nil { - n += 1 + sovGenerated(uint64(*m.RPU)) - } - if m.Duration != nil { - l = m.Duration.Size() + if m.Log != nil { + l = m.Log.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.MsgSize != nil { - n += 1 + sovGenerated(uint64(*m.MsgSize)) - } - if m.KeyCount != nil { - n += 1 + sovGenerated(uint64(*m.KeyCount)) - } - if m.Value != nil { - n += 1 + sovGenerated(uint64(*m.Value)) + if m.Kafka != nil { + l = m.Kafka.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.Jitter != nil { - l = m.Jitter.Size() + if m.Blackhole != nil { + l = m.Blackhole.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.ValueBlob != nil { - l = len(*m.ValueBlob) + if m.UDSink != nil { + l = m.UDSink.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *GetDaemonDeploymentReq) Size() (n int) { +func (m *AbstractVertex) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.ISBSvcType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Image) + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PullPolicy) + if m.Source != nil { + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Sink != nil { + l = m.Sink.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.UDF != nil { + l = m.UDF.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ContainerTemplate != nil { + l = m.ContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.InitContainerTemplate != nil { + l = m.InitContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.AbstractPodTemplate.Size() n += 1 + l + sovGenerated(uint64(l)) - if len(m.Env) > 0 { - for _, e := range m.Env { + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - l = m.DefaultResources.Size() + if m.Limits != nil { + l = m.Limits.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Scale.Size() n += 1 + l + sovGenerated(uint64(l)) + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Sidecars) > 0 { + for _, e := range m.Sidecars { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Partitions != nil { + n += 1 + sovGenerated(uint64(*m.Partitions)) + } + if len(m.SideInputs) > 0 { + for _, s := range m.SideInputs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.SideInputsContainerTemplate != nil { + l = m.SideInputsContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *GetJetStreamServiceSpecReq) Size() (n int) { +func (m *Authorization) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + if m.Token != nil { + l = m.Token.Size() + n += 1 + l + sovGenerated(uint64(l)) } - n += 1 + sovGenerated(uint64(m.ClusterPort)) - n += 1 + sovGenerated(uint64(m.ClientPort)) - n += 1 + sovGenerated(uint64(m.MonitorPort)) - n += 1 + sovGenerated(uint64(m.MetricsPort)) return n } -func (m *GetJetStreamStatefulSetSpecReq) Size() (n int) { +func (m *BasicAuth) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Password != nil { + l = m.Password.Size() + n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.NatsImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.MetricsExporterImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ConfigReloaderImage) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.ClusterPort)) - n += 1 + sovGenerated(uint64(m.ClientPort)) - n += 1 + sovGenerated(uint64(m.MonitorPort)) - n += 1 + sovGenerated(uint64(m.MetricsPort)) - l = len(m.ServerAuthSecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ServerEncryptionSecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ConfigMapName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PvcNameIfNeeded) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StartCommand) - n += 1 + l + sovGenerated(uint64(l)) - l = m.DefaultResources.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *GetRedisServiceSpecReq) Size() (n int) { +func (m *Blackhole) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - n += 1 + sovGenerated(uint64(m.RedisContainerPort)) - n += 1 + sovGenerated(uint64(m.SentinelContainerPort)) return n } -func (m *GetRedisStatefulSetSpecReq) Size() (n int) { +func (m *BufferServiceConfig) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + if m.Redis != nil { + l = m.Redis.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.JetStream != nil { + l = m.JetStream.Size() + n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.RedisImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SentinelImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.MetricsExporterImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.InitContainerImage) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.RedisContainerPort)) - n += 1 + sovGenerated(uint64(m.SentinelContainerPort)) - n += 1 + sovGenerated(uint64(m.RedisMetricsContainerPort)) - l = len(m.CredentialSecretName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.PvcNameIfNeeded) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ConfConfigMapName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ScriptsConfigMapName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HealthConfigMapName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.DefaultResources.Size() - n += 2 + l + sovGenerated(uint64(l)) return n } -func (m *GetSideInputDeploymentReq) Size() (n int) { +func (m *CombinedEdge) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.ISBSvcType) + l = m.Edge.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Image) + l = len(m.FromVertexType) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PullPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.FromVertexPartitionCount != nil { + n += 1 + sovGenerated(uint64(*m.FromVertexPartitionCount)) } - l = m.DefaultResources.Size() + if m.FromVertexLimits != nil { + l = m.FromVertexLimits.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ToVertexType) n += 1 + l + sovGenerated(uint64(l)) + if m.ToVertexPartitionCount != nil { + n += 1 + sovGenerated(uint64(*m.ToVertexPartitionCount)) + } + if m.ToVertexLimits != nil { + l = m.ToVertexLimits.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *GetVertexPodSpecReq) Size() (n int) { +func (m *Container) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.ISBSvcType) - n += 1 + l + sovGenerated(uint64(l)) l = len(m.Image) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PullPolicy) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } if len(m.Env) > 0 { for _, e := range m.Env { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - l = len(m.SideInputsStoreName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ServingSourceStreamName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.PipelineSpec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.DefaultResources.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GroupBy) Size() (n int) { - if m == nil { - return 0 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - var l int - _ = l - l = m.Window.Size() + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.AllowedLateness != nil { - l = m.AllowedLateness.Size() + if m.SecurityContext != nil { + l = m.SecurityContext.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Storage != nil { - l = m.Storage.Size() + if m.ImagePullPolicy != nil { + l = len(*m.ImagePullPolicy) n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *HTTPSource) Size() (n int) { +func (m *ContainerTemplate) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Auth != nil { - l = m.Auth.Size() + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() n += 1 + l + sovGenerated(uint64(l)) } - n += 2 + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *IdleSource) Size() (n int) { +func (m *DaemonTemplate) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Threshold != nil { - l = m.Threshold.Size() - n += 1 + l + sovGenerated(uint64(l)) + l = m.AbstractPodTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) } - if m.StepInterval != nil { - l = m.StepInterval.Size() + if m.ContainerTemplate != nil { + l = m.ContainerTemplate.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.IncrementBy != nil { - l = m.IncrementBy.Size() + if m.InitContainerTemplate != nil { + l = m.InitContainerTemplate.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *InterStepBufferService) Size() (n int) { +func (m *Edge) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() + l = len(m.From) n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() + l = len(m.To) n += 1 + l + sovGenerated(uint64(l)) + if m.Conditions != nil { + l = m.Conditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.OnFull != nil { + l = len(*m.OnFull) + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *InterStepBufferServiceList) Size() (n int) { +func (m *FixedWindow) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.Length != nil { + l = m.Length.Size() + n += 1 + l + sovGenerated(uint64(l)) } + n += 2 return n } -func (m *InterStepBufferServiceSpec) Size() (n int) { +func (m *ForwardConditions) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Redis != nil { - l = m.Redis.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.JetStream != nil { - l = m.JetStream.Size() + if m.Tags != nil { + l = m.Tags.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *InterStepBufferServiceStatus) Size() (n int) { +func (m *Function) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Config.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.KWArgs) > 0 { + for k, v := range m.KWArgs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } return n } -func (m *JetStreamBufferService) Size() (n int) { +func (m *GSSAPI) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Version) + l = len(m.ServiceName) n += 1 + l + sovGenerated(uint64(l)) - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) + l = len(m.Realm) + n += 1 + l + sovGenerated(uint64(l)) + if m.UsernameSecret != nil { + l = m.UsernameSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.ContainerTemplate != nil { - l = m.ContainerTemplate.Size() + if m.AuthType != nil { + l = len(*m.AuthType) n += 1 + l + sovGenerated(uint64(l)) } - if m.ReloaderContainerTemplate != nil { - l = m.ReloaderContainerTemplate.Size() + if m.PasswordSecret != nil { + l = m.PasswordSecret.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.MetricsContainerTemplate != nil { - l = m.MetricsContainerTemplate.Size() + if m.KeytabSecret != nil { + l = m.KeytabSecret.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Persistence != nil { - l = m.Persistence.Size() + if m.KerberosConfigSecret != nil { + l = m.KerberosConfigSecret.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = m.AbstractPodTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Settings != nil { - l = len(*m.Settings) + return n +} + +func (m *GeneratorSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RPU != nil { + n += 1 + sovGenerated(uint64(*m.RPU)) + } + if m.Duration != nil { + l = m.Duration.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.StartArgs) > 0 { - for _, s := range m.StartArgs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.MsgSize != nil { + n += 1 + sovGenerated(uint64(*m.MsgSize)) } - if m.BufferConfig != nil { - l = len(*m.BufferConfig) + if m.KeyCount != nil { + n += 1 + sovGenerated(uint64(*m.KeyCount)) + } + if m.Value != nil { + n += 1 + sovGenerated(uint64(*m.Value)) + } + if m.Jitter != nil { + l = m.Jitter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ValueBlob != nil { + l = len(*m.ValueBlob) n += 1 + l + sovGenerated(uint64(l)) } - n += 2 - n += 2 return n } -func (m *JetStreamConfig) Size() (n int) { +func (m *GetDaemonDeploymentReq) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.URL) + l = len(m.ISBSvcType) n += 1 + l + sovGenerated(uint64(l)) - if m.Auth != nil { - l = m.Auth.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.StreamConfig) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *JetStreamSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.URL) + l = len(m.Image) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Stream) + l = len(m.PullPolicy) n += 1 + l + sovGenerated(uint64(l)) - if m.TLS != nil { - l = m.TLS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Auth != nil { - l = m.Auth.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } + l = m.DefaultResources.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *JobTemplate) Size() (n int) { +func (m *GetJetStreamServiceSpecReq) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.AbstractPodTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.ContainerTemplate != nil { - l = m.ContainerTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TTLSecondsAfterFinished != nil { - n += 1 + sovGenerated(uint64(*m.TTLSecondsAfterFinished)) - } - if m.BackoffLimit != nil { - n += 1 + sovGenerated(uint64(*m.BackoffLimit)) + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } + n += 1 + sovGenerated(uint64(m.ClusterPort)) + n += 1 + sovGenerated(uint64(m.ClientPort)) + n += 1 + sovGenerated(uint64(m.MonitorPort)) + n += 1 + sovGenerated(uint64(m.MetricsPort)) return n } -func (m *KafkaSink) Size() (n int) { +func (m *GetJetStreamStatefulSetSpecReq) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Brokers) > 0 { - for _, s := range m.Brokers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - l = len(m.Topic) + l = len(m.NatsImage) n += 1 + l + sovGenerated(uint64(l)) - if m.TLS != nil { - l = m.TLS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Config) + l = len(m.MetricsExporterImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ConfigReloaderImage) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ClusterPort)) + n += 1 + sovGenerated(uint64(m.ClientPort)) + n += 1 + sovGenerated(uint64(m.MonitorPort)) + n += 1 + sovGenerated(uint64(m.MetricsPort)) + l = len(m.ServerAuthSecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServerEncryptionSecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ConfigMapName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PvcNameIfNeeded) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StartCommand) + n += 1 + l + sovGenerated(uint64(l)) + l = m.DefaultResources.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.SASL != nil { - l = m.SASL.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } -func (m *KafkaSource) Size() (n int) { +func (m *GetMonoVertexDaemonDeploymentReq) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Brokers) > 0 { - for _, s := range m.Brokers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Topic) + l = len(m.Image) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ConsumerGroupName) + l = len(m.PullPolicy) n += 1 + l + sovGenerated(uint64(l)) - if m.TLS != nil { - l = m.TLS.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - l = len(m.Config) + l = m.DefaultResources.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.SASL != nil { - l = m.SASL.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } -func (m *Lifecycle) Size() (n int) { +func (m *GetMonoVertexPodSpecReq) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.DeleteGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.DeleteGracePeriodSeconds)) - } - l = len(m.DesiredPhase) + l = len(m.Image) n += 1 + l + sovGenerated(uint64(l)) - if m.PauseGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.PauseGracePeriodSeconds)) - } - return n -} - -func (m *Log) Size() (n int) { - if m == nil { - return 0 + l = len(m.PullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - var l int - _ = l + l = m.DefaultResources.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *Metadata) Size() (n int) { +func (m *GetRedisServiceSpecReq) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } if len(m.Labels) > 0 { for k, v := range m.Labels { _ = k @@ -9310,145 +9615,162 @@ func (m *Metadata) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + n += 1 + sovGenerated(uint64(m.RedisContainerPort)) + n += 1 + sovGenerated(uint64(m.SentinelContainerPort)) return n } -func (m *NativeRedis) Size() (n int) { +func (m *GetRedisStatefulSetSpecReq) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Version) + l = len(m.ServiceName) n += 1 + l + sovGenerated(uint64(l)) - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.RedisContainerTemplate != nil { - l = m.RedisContainerTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SentinelContainerTemplate != nil { - l = m.SentinelContainerTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MetricsContainerTemplate != nil { - l = m.MetricsContainerTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.InitContainerTemplate != nil { - l = m.InitContainerTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Persistence != nil { - l = m.Persistence.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - l = m.AbstractPodTemplate.Size() + l = len(m.RedisImage) n += 1 + l + sovGenerated(uint64(l)) - if m.Settings != nil { - l = m.Settings.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + l = len(m.SentinelImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricsExporterImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.InitContainerImage) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.RedisContainerPort)) + n += 1 + sovGenerated(uint64(m.SentinelContainerPort)) + n += 1 + sovGenerated(uint64(m.RedisMetricsContainerPort)) + l = len(m.CredentialSecretName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.PvcNameIfNeeded) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ConfConfigMapName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ScriptsConfigMapName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HealthConfigMapName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.DefaultResources.Size() + n += 2 + l + sovGenerated(uint64(l)) return n } -func (m *NatsAuth) Size() (n int) { +func (m *GetSideInputDeploymentReq) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Basic != nil { - l = m.Basic.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Token != nil { - l = m.Token.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NKey != nil { - l = m.NKey.Size() - n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ISBSvcType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } + l = m.DefaultResources.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *NatsSource) Size() (n int) { +func (m *GetVertexPodSpecReq) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.URL) + l = len(m.ISBSvcType) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Subject) + l = len(m.Image) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Queue) + l = len(m.PullPolicy) n += 1 + l + sovGenerated(uint64(l)) - if m.TLS != nil { - l = m.TLS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Auth != nil { - l = m.Auth.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } + l = len(m.SideInputsStoreName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServingSourceStreamName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.PipelineSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DefaultResources.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *NoStore) Size() (n int) { +func (m *GroupBy) Size() (n int) { if m == nil { return 0 } var l int _ = l + l = m.Window.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.AllowedLateness != nil { + l = m.AllowedLateness.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Storage != nil { + l = m.Storage.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *PBQStorage) Size() (n int) { +func (m *HTTPSource) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.PersistentVolumeClaim != nil { - l = m.PersistentVolumeClaim.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.EmptyDir != nil { - l = m.EmptyDir.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NoStore != nil { - l = m.NoStore.Size() + if m.Auth != nil { + l = m.Auth.Size() n += 1 + l + sovGenerated(uint64(l)) } + n += 2 return n } -func (m *PersistenceStrategy) Size() (n int) { +func (m *IdleSource) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.StorageClassName != nil { - l = len(*m.StorageClassName) + if m.Threshold != nil { + l = m.Threshold.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.AccessMode != nil { - l = len(*m.AccessMode) + if m.StepInterval != nil { + l = m.StepInterval.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.VolumeSize != nil { - l = m.VolumeSize.Size() + if m.IncrementBy != nil { + l = m.IncrementBy.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Pipeline) Size() (n int) { +func (m *InterStepBufferService) Size() (n int) { if m == nil { return 0 } @@ -9463,29 +9785,7 @@ func (m *Pipeline) Size() (n int) { return n } -func (m *PipelineLimits) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ReadBatchSize != nil { - n += 1 + sovGenerated(uint64(*m.ReadBatchSize)) - } - if m.BufferMaxLength != nil { - n += 1 + sovGenerated(uint64(*m.BufferMaxLength)) - } - if m.BufferUsageLimit != nil { - n += 1 + sovGenerated(uint64(*m.BufferUsageLimit)) - } - if m.ReadTimeout != nil { - l = m.ReadTimeout.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PipelineList) Size() (n int) { +func (m *InterStepBufferServiceList) Size() (n int) { if m == nil { return 0 } @@ -9502,48 +9802,24 @@ func (m *PipelineList) Size() (n int) { return n } -func (m *PipelineSpec) Size() (n int) { +func (m *InterStepBufferServiceSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.InterStepBufferServiceName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Vertices) > 0 { - for _, e := range m.Vertices { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Edges) > 0 { - for _, e := range m.Edges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.Lifecycle.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Limits != nil { - l = m.Limits.Size() + if m.Redis != nil { + l = m.Redis.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = m.Watermark.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Templates != nil { - l = m.Templates.Size() + if m.JetStream != nil { + l = m.JetStream.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.SideInputs) > 0 { - for _, e := range m.SideInputs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } -func (m *PipelineStatus) Size() (n int) { +func (m *InterStepBufferServiceStatus) Size() (n int) { if m == nil { return 0 } @@ -9555,48 +9831,63 @@ func (m *PipelineStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdated.Size() + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) n += 1 + l + sovGenerated(uint64(l)) - if m.VertexCount != nil { - n += 1 + sovGenerated(uint64(*m.VertexCount)) - } - if m.SourceCount != nil { - n += 1 + sovGenerated(uint64(*m.SourceCount)) - } - if m.SinkCount != nil { - n += 1 + sovGenerated(uint64(*m.SinkCount)) - } - if m.UDFCount != nil { - n += 1 + sovGenerated(uint64(*m.UDFCount)) - } - if m.MapUDFCount != nil { - n += 1 + sovGenerated(uint64(*m.MapUDFCount)) - } - if m.ReduceUDFCount != nil { - n += 1 + sovGenerated(uint64(*m.ReduceUDFCount)) - } n += 1 + sovGenerated(uint64(m.ObservedGeneration)) return n } -func (m *RedisBufferService) Size() (n int) { +func (m *JetStreamBufferService) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Native != nil { - l = m.Native.Size() + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.ContainerTemplate != nil { + l = m.ContainerTemplate.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.External != nil { - l = m.External.Size() + if m.ReloaderContainerTemplate != nil { + l = m.ReloaderContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MetricsContainerTemplate != nil { + l = m.MetricsContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Persistence != nil { + l = m.Persistence.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.AbstractPodTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Settings != nil { + l = len(*m.Settings) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.StartArgs) > 0 { + for _, s := range m.StartArgs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.BufferConfig != nil { + l = len(*m.BufferConfig) n += 1 + l + sovGenerated(uint64(l)) } + n += 2 + n += 2 return n } -func (m *RedisConfig) Size() (n int) { +func (m *JetStreamConfig) Size() (n int) { if m == nil { return 0 } @@ -9604,481 +9895,428 @@ func (m *RedisConfig) Size() (n int) { _ = l l = len(m.URL) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SentinelURL) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.MasterName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - if m.Password != nil { - l = m.Password.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SentinelPassword != nil { - l = m.SentinelPassword.Size() + if m.Auth != nil { + l = m.Auth.Size() n += 1 + l + sovGenerated(uint64(l)) } + l = len(m.StreamConfig) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } -func (m *RedisSettings) Size() (n int) { +func (m *JetStreamSource) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Redis) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Master) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Replica) + l = len(m.URL) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Sentinel) + l = len(m.Stream) n += 1 + l + sovGenerated(uint64(l)) + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Auth != nil { + l = m.Auth.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *SASL) Size() (n int) { +func (m *JobTemplate) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Mechanism != nil { - l = len(*m.Mechanism) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GSSAPI != nil { - l = m.GSSAPI.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Plain != nil { - l = m.Plain.Size() + l = m.AbstractPodTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.ContainerTemplate != nil { + l = m.ContainerTemplate.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.SCRAMSHA256 != nil { - l = m.SCRAMSHA256.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.TTLSecondsAfterFinished != nil { + n += 1 + sovGenerated(uint64(*m.TTLSecondsAfterFinished)) } - if m.SCRAMSHA512 != nil { - l = m.SCRAMSHA512.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.BackoffLimit != nil { + n += 1 + sovGenerated(uint64(*m.BackoffLimit)) } return n } -func (m *SASLPlain) Size() (n int) { +func (m *KafkaSink) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.UserSecret != nil { - l = m.UserSecret.Size() + if len(m.Brokers) > 0 { + for _, s := range m.Brokers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Topic) + n += 1 + l + sovGenerated(uint64(l)) + if m.TLS != nil { + l = m.TLS.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.PasswordSecret != nil { - l = m.PasswordSecret.Size() + l = len(m.Config) + n += 1 + l + sovGenerated(uint64(l)) + if m.SASL != nil { + l = m.SASL.Size() n += 1 + l + sovGenerated(uint64(l)) } - n += 2 return n } -func (m *Scale) Size() (n int) { +func (m *KafkaSource) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 2 - if m.Min != nil { - n += 1 + sovGenerated(uint64(*m.Min)) - } - if m.Max != nil { - n += 1 + sovGenerated(uint64(*m.Max)) - } - if m.LookbackSeconds != nil { - n += 1 + sovGenerated(uint64(*m.LookbackSeconds)) - } - if m.DeprecatedCooldownSeconds != nil { - n += 1 + sovGenerated(uint64(*m.DeprecatedCooldownSeconds)) - } - if m.ZeroReplicaSleepSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ZeroReplicaSleepSeconds)) - } - if m.TargetProcessingSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TargetProcessingSeconds)) - } - if m.TargetBufferAvailability != nil { - n += 1 + sovGenerated(uint64(*m.TargetBufferAvailability)) - } - if m.ReplicasPerScale != nil { - n += 1 + sovGenerated(uint64(*m.ReplicasPerScale)) + if len(m.Brokers) > 0 { + for _, s := range m.Brokers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.ScaleUpCooldownSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ScaleUpCooldownSeconds)) + l = len(m.Topic) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ConsumerGroupName) + n += 1 + l + sovGenerated(uint64(l)) + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.ScaleDownCooldownSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ScaleDownCooldownSeconds)) + l = len(m.Config) + n += 1 + l + sovGenerated(uint64(l)) + if m.SASL != nil { + l = m.SASL.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *ServingSource) Size() (n int) { +func (m *Lifecycle) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Auth != nil { - l = m.Auth.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.MsgIDHeaderKey != nil { - l = len(*m.MsgIDHeaderKey) - n += 1 + l + sovGenerated(uint64(l)) + if m.DeleteGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.DeleteGracePeriodSeconds)) } - if m.Store != nil { - l = m.Store.Size() - n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DesiredPhase) + n += 1 + l + sovGenerated(uint64(l)) + if m.PauseGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.PauseGracePeriodSeconds)) } return n } -func (m *ServingStore) Size() (n int) { +func (m *Log) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.URL != nil { - l = len(*m.URL) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TTL != nil { - l = m.TTL.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } -func (m *SessionWindow) Size() (n int) { +func (m *Metadata) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Timeout != nil { - l = m.Timeout.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } return n } -func (m *SideInput) Size() (n int) { +func (m *MonoVertex) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.Container != nil { - l = m.Container.Size() - n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MonoVertexLimits) Size() (n int) { + if m == nil { + return 0 } - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + var l int + _ = l + if m.ReadBatchSize != nil { + n += 1 + sovGenerated(uint64(*m.ReadBatchSize)) } - if m.Trigger != nil { - l = m.Trigger.Size() + if m.ReadTimeout != nil { + l = m.ReadTimeout.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *SideInputTrigger) Size() (n int) { +func (m *MonoVertexList) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Schedule) + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.Timezone != nil { - l = len(*m.Timezone) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } return n } -func (m *SideInputsManagerTemplate) Size() (n int) { +func (m *MonoVertexSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Source != nil { + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Sink != nil { + l = m.Sink.Size() + n += 1 + l + sovGenerated(uint64(l)) + } l = m.AbstractPodTemplate.Size() n += 1 + l + sovGenerated(uint64(l)) if m.ContainerTemplate != nil { l = m.ContainerTemplate.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.InitContainerTemplate != nil { - l = m.InitContainerTemplate.Size() + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Limits != nil { + l = m.Limits.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Scale.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Sidecars) > 0 { + for _, e := range m.Sidecars { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DaemonTemplate != nil { + l = m.DaemonTemplate.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Sink) Size() (n int) { +func (m *MonoVertexStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.AbstractSink.Size() + l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.Fallback != nil { - l = m.Fallback.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Replicas)) + l = len(m.Selector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdated.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastScaledAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) return n } -func (m *SlidingWindow) Size() (n int) { +func (m *NativeRedis) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Length != nil { - l = m.Length.Size() + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.RedisContainerTemplate != nil { + l = m.RedisContainerTemplate.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Slide != nil { - l = m.Slide.Size() + if m.SentinelContainerTemplate != nil { + l = m.SentinelContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MetricsContainerTemplate != nil { + l = m.MetricsContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.InitContainerTemplate != nil { + l = m.InitContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Persistence != nil { + l = m.Persistence.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.AbstractPodTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Settings != nil { + l = m.Settings.Size() n += 1 + l + sovGenerated(uint64(l)) } - n += 2 return n } -func (m *Source) Size() (n int) { +func (m *NatsAuth) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Generator != nil { - l = m.Generator.Size() + if m.Basic != nil { + l = m.Basic.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Kafka != nil { - l = m.Kafka.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Nats != nil { - l = m.Nats.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.UDTransformer != nil { - l = m.UDTransformer.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.UDSource != nil { - l = m.UDSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.JetStream != nil { - l = m.JetStream.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Serving != nil { - l = m.Serving.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Status) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *TLS) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - if m.CACertSecret != nil { - l = m.CACertSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CertSecret != nil { - l = m.CertSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.KeySecret != nil { - l = m.KeySecret.Size() + if m.Token != nil { + l = m.Token.Size() n += 1 + l + sovGenerated(uint64(l)) } - return n -} - -func (m *TagConditions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Operator != nil { - l = len(*m.Operator) + if m.NKey != nil { + l = m.NKey.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } -func (m *Templates) Size() (n int) { +func (m *NatsSource) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.DaemonTemplate != nil { - l = m.DaemonTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.JobTemplate != nil { - l = m.JobTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SideInputsManagerTemplate != nil { - l = m.SideInputsManagerTemplate.Size() + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subject) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Queue) + n += 1 + l + sovGenerated(uint64(l)) + if m.TLS != nil { + l = m.TLS.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.VertexTemplate != nil { - l = m.VertexTemplate.Size() + if m.Auth != nil { + l = m.Auth.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Transformer) Size() (n int) { +func (m *NoStore) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.KWArgs) > 0 { - for k, v := range m.KWArgs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } return n } -func (m *UDF) Size() (n int) { +func (m *PBQStorage) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Container != nil { - l = m.Container.Size() + if m.PersistentVolumeClaim != nil { + l = m.PersistentVolumeClaim.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Builtin != nil { - l = m.Builtin.Size() + if m.EmptyDir != nil { + l = m.EmptyDir.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.GroupBy != nil { - l = m.GroupBy.Size() + if m.NoStore != nil { + l = m.NoStore.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *UDSink) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *UDSource) Size() (n int) { +func (m *PersistenceStrategy) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Container != nil { - l = m.Container.Size() + if m.StorageClassName != nil { + l = len(*m.StorageClassName) n += 1 + l + sovGenerated(uint64(l)) } - return n -} - -func (m *UDTransformer) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Container != nil { - l = m.Container.Size() + if m.AccessMode != nil { + l = len(*m.AccessMode) n += 1 + l + sovGenerated(uint64(l)) } - if m.Builtin != nil { - l = m.Builtin.Size() + if m.VolumeSize != nil { + l = m.VolumeSize.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Vertex) Size() (n int) { +func (m *Pipeline) Size() (n int) { if m == nil { return 0 } @@ -10093,23 +10331,7 @@ func (m *Vertex) Size() (n int) { return n } -func (m *VertexInstance) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Vertex != nil { - l = m.Vertex.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Replica)) - return n -} - -func (m *VertexLimits) Size() (n int) { +func (m *PipelineLimits) Size() (n int) { if m == nil { return 0 } @@ -10118,20 +10340,20 @@ func (m *VertexLimits) Size() (n int) { if m.ReadBatchSize != nil { n += 1 + sovGenerated(uint64(*m.ReadBatchSize)) } - if m.ReadTimeout != nil { - l = m.ReadTimeout.Size() - n += 1 + l + sovGenerated(uint64(l)) - } if m.BufferMaxLength != nil { n += 1 + sovGenerated(uint64(*m.BufferMaxLength)) } if m.BufferUsageLimit != nil { n += 1 + sovGenerated(uint64(*m.BufferUsageLimit)) } + if m.ReadTimeout != nil { + l = m.ReadTimeout.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *VertexList) Size() (n int) { +func (m *PipelineList) Size() (n int) { if m == nil { return 0 } @@ -10148,39 +10370,48 @@ func (m *VertexList) Size() (n int) { return n } -func (m *VertexSpec) Size() (n int) { +func (m *PipelineSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.AbstractVertex.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PipelineName) - n += 1 + l + sovGenerated(uint64(l)) l = len(m.InterStepBufferServiceName) n += 1 + l + sovGenerated(uint64(l)) - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if len(m.FromEdges) > 0 { - for _, e := range m.FromEdges { + if len(m.Vertices) > 0 { + for _, e := range m.Vertices { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.ToEdges) > 0 { - for _, e := range m.ToEdges { + if len(m.Edges) > 0 { + for _, e := range m.Edges { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Limits != nil { + l = m.Limits.Size() + n += 1 + l + sovGenerated(uint64(l)) + } l = m.Watermark.Size() n += 1 + l + sovGenerated(uint64(l)) + if m.Templates != nil { + l = m.Templates.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.SideInputs) > 0 { + for _, e := range m.SideInputs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *VertexStatus) Size() (n int) { +func (m *PipelineStatus) Size() (n int) { if m == nil { return 0 } @@ -10190,1221 +10421,976 @@ func (m *VertexStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Phase) n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Replicas)) - l = len(m.Selector) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) - l = m.LastScaledAt.Size() + l = m.LastUpdated.Size() n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - return n -} - -func (m *VertexTemplate) Size() (n int) { - if m == nil { - return 0 + if m.VertexCount != nil { + n += 1 + sovGenerated(uint64(*m.VertexCount)) } - var l int - _ = l - l = m.AbstractPodTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.ContainerTemplate != nil { - l = m.ContainerTemplate.Size() + if m.SourceCount != nil { + n += 1 + sovGenerated(uint64(*m.SourceCount)) + } + if m.SinkCount != nil { + n += 1 + sovGenerated(uint64(*m.SinkCount)) + } + if m.UDFCount != nil { + n += 1 + sovGenerated(uint64(*m.UDFCount)) + } + if m.MapUDFCount != nil { + n += 1 + sovGenerated(uint64(*m.MapUDFCount)) + } + if m.ReduceUDFCount != nil { + n += 1 + sovGenerated(uint64(*m.ReduceUDFCount)) + } + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + return n +} + +func (m *RedisBufferService) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Native != nil { + l = m.Native.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.InitContainerTemplate != nil { - l = m.InitContainerTemplate.Size() + if m.External != nil { + l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Watermark) Size() (n int) { +func (m *RedisConfig) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 2 - if m.MaxDelay != nil { - l = m.MaxDelay.Size() + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SentinelURL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MasterName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if m.Password != nil { + l = m.Password.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.IdleSource != nil { - l = m.IdleSource.Size() + if m.SentinelPassword != nil { + l = m.SentinelPassword.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Window) Size() (n int) { +func (m *RedisSettings) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Fixed != nil { - l = m.Fixed.Size() + l = len(m.Redis) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Master) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Replica) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Sentinel) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SASL) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Mechanism != nil { + l = len(*m.Mechanism) n += 1 + l + sovGenerated(uint64(l)) } - if m.Sliding != nil { - l = m.Sliding.Size() + if m.GSSAPI != nil { + l = m.GSSAPI.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Session != nil { - l = m.Session.Size() + if m.Plain != nil { + l = m.Plain.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SCRAMSHA256 != nil { + l = m.SCRAMSHA256.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SCRAMSHA512 != nil { + l = m.SCRAMSHA512.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +func (m *SASLPlain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UserSecret != nil { + l = m.UserSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PasswordSecret != nil { + l = m.PasswordSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n } -func (this *AbstractPodTemplate) String() string { - if this == nil { - return "nil" + +func (m *Scale) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForTolerations := "[]Toleration{" - for _, f := range this.Tolerations { - repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + var l int + _ = l + n += 2 + if m.Min != nil { + n += 1 + sovGenerated(uint64(*m.Min)) } - repeatedStringForTolerations += "}" - repeatedStringForImagePullSecrets := "[]LocalObjectReference{" - for _, f := range this.ImagePullSecrets { - repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," + if m.Max != nil { + n += 1 + sovGenerated(uint64(*m.Max)) } - repeatedStringForImagePullSecrets += "}" - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) + if m.LookbackSeconds != nil { + n += 1 + sovGenerated(uint64(*m.LookbackSeconds)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + if m.DeprecatedCooldownSeconds != nil { + n += 1 + sovGenerated(uint64(*m.DeprecatedCooldownSeconds)) } - mapStringForNodeSelector += "}" - s := strings.Join([]string{`&AbstractPodTemplate{`, - `Metadata:` + strings.Replace(this.Metadata.String(), "Metadata", "Metadata", 1) + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `Tolerations:` + repeatedStringForTolerations + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, - `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, - `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, - `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "v1.PodDNSConfig", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AbstractSink) String() string { - if this == nil { - return "nil" + if m.ZeroReplicaSleepSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ZeroReplicaSleepSeconds)) } - s := strings.Join([]string{`&AbstractSink{`, - `Log:` + strings.Replace(this.Log.String(), "Log", "Log", 1) + `,`, - `Kafka:` + strings.Replace(this.Kafka.String(), "KafkaSink", "KafkaSink", 1) + `,`, - `Blackhole:` + strings.Replace(this.Blackhole.String(), "Blackhole", "Blackhole", 1) + `,`, - `UDSink:` + strings.Replace(this.UDSink.String(), "UDSink", "UDSink", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AbstractVertex) String() string { - if this == nil { - return "nil" + if m.TargetProcessingSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TargetProcessingSeconds)) } - repeatedStringForVolumes := "[]Volume{" - for _, f := range this.Volumes { - repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + if m.TargetBufferAvailability != nil { + n += 1 + sovGenerated(uint64(*m.TargetBufferAvailability)) } - repeatedStringForVolumes += "}" - repeatedStringForInitContainers := "[]Container{" - for _, f := range this.InitContainers { - repeatedStringForInitContainers += fmt.Sprintf("%v", f) + "," + if m.ReplicasPerScale != nil { + n += 1 + sovGenerated(uint64(*m.ReplicasPerScale)) } - repeatedStringForInitContainers += "}" - repeatedStringForSidecars := "[]Container{" - for _, f := range this.Sidecars { - repeatedStringForSidecars += fmt.Sprintf("%v", f) + "," + if m.ScaleUpCooldownSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ScaleUpCooldownSeconds)) } - repeatedStringForSidecars += "}" - s := strings.Join([]string{`&AbstractVertex{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Source:` + strings.Replace(this.Source.String(), "Source", "Source", 1) + `,`, - `Sink:` + strings.Replace(this.Sink.String(), "Sink", "Sink", 1) + `,`, - `UDF:` + strings.Replace(this.UDF.String(), "UDF", "UDF", 1) + `,`, - `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `InitContainerTemplate:` + strings.Replace(this.InitContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, - `Volumes:` + repeatedStringForVolumes + `,`, - `Limits:` + strings.Replace(this.Limits.String(), "VertexLimits", "VertexLimits", 1) + `,`, - `Scale:` + strings.Replace(strings.Replace(this.Scale.String(), "Scale", "Scale", 1), `&`, ``, 1) + `,`, - `InitContainers:` + repeatedStringForInitContainers + `,`, - `Sidecars:` + repeatedStringForSidecars + `,`, - `Partitions:` + valueToStringGenerated(this.Partitions) + `,`, - `SideInputs:` + fmt.Sprintf("%v", this.SideInputs) + `,`, - `SideInputsContainerTemplate:` + strings.Replace(this.SideInputsContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Authorization) String() string { - if this == nil { - return "nil" + if m.ScaleDownCooldownSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ScaleDownCooldownSeconds)) } - s := strings.Join([]string{`&Authorization{`, - `Token:` + strings.Replace(fmt.Sprintf("%v", this.Token), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *BasicAuth) String() string { - if this == nil { - return "nil" + +func (m *ServingSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&BasicAuth{`, - `User:` + strings.Replace(fmt.Sprintf("%v", this.User), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `Password:` + strings.Replace(fmt.Sprintf("%v", this.Password), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Blackhole) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Auth != nil { + l = m.Auth.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&Blackhole{`, - `}`, - }, "") - return s -} -func (this *BufferServiceConfig) String() string { - if this == nil { - return "nil" + n += 2 + if m.MsgIDHeaderKey != nil { + l = len(*m.MsgIDHeaderKey) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&BufferServiceConfig{`, - `Redis:` + strings.Replace(this.Redis.String(), "RedisConfig", "RedisConfig", 1) + `,`, - `JetStream:` + strings.Replace(this.JetStream.String(), "JetStreamConfig", "JetStreamConfig", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CombinedEdge) String() string { - if this == nil { - return "nil" + if m.Store != nil { + l = m.Store.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&CombinedEdge{`, - `Edge:` + strings.Replace(strings.Replace(this.Edge.String(), "Edge", "Edge", 1), `&`, ``, 1) + `,`, - `FromVertexType:` + fmt.Sprintf("%v", this.FromVertexType) + `,`, - `FromVertexPartitionCount:` + valueToStringGenerated(this.FromVertexPartitionCount) + `,`, - `FromVertexLimits:` + strings.Replace(this.FromVertexLimits.String(), "VertexLimits", "VertexLimits", 1) + `,`, - `ToVertexType:` + fmt.Sprintf("%v", this.ToVertexType) + `,`, - `ToVertexPartitionCount:` + valueToStringGenerated(this.ToVertexPartitionCount) + `,`, - `ToVertexLimits:` + strings.Replace(this.ToVertexLimits.String(), "VertexLimits", "VertexLimits", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *Container) String() string { - if this == nil { - return "nil" + +func (m *ServingStore) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + var l int + _ = l + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForEnv += "}" - repeatedStringForEnvFrom := "[]EnvFromSource{" - for _, f := range this.EnvFrom { - repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," + if m.TTL != nil { + l = m.TTL.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForEnvFrom += "}" - repeatedStringForVolumeMounts := "[]VolumeMount{" - for _, f := range this.VolumeMounts { - repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," + return n +} + +func (m *SessionWindow) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForVolumeMounts += "}" - s := strings.Join([]string{`&Container{`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `Args:` + fmt.Sprintf("%v", this.Args) + `,`, - `Env:` + repeatedStringForEnv + `,`, - `EnvFrom:` + repeatedStringForEnvFrom + `,`, - `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, - `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "v1.SecurityContext", 1) + `,`, - `ImagePullPolicy:` + valueToStringGenerated(this.ImagePullPolicy) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if m.Timeout != nil { + l = m.Timeout.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *ContainerTemplate) String() string { - if this == nil { - return "nil" + +func (m *SideInput) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForEnv += "}" - repeatedStringForEnvFrom := "[]EnvFromSource{" - for _, f := range this.EnvFrom { - repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForEnvFrom += "}" - s := strings.Join([]string{`&ContainerTemplate{`, - `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, - `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "v1.SecurityContext", 1) + `,`, - `Env:` + repeatedStringForEnv + `,`, - `EnvFrom:` + repeatedStringForEnvFrom + `,`, - `}`, - }, "") - return s -} -func (this *DaemonTemplate) String() string { - if this == nil { - return "nil" + if m.Trigger != nil { + l = m.Trigger.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&DaemonTemplate{`, - `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `InitContainerTemplate:` + strings.Replace(this.InitContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *Edge) String() string { - if this == nil { - return "nil" + +func (m *SideInputTrigger) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Edge{`, - `From:` + fmt.Sprintf("%v", this.From) + `,`, - `To:` + fmt.Sprintf("%v", this.To) + `,`, - `Conditions:` + strings.Replace(this.Conditions.String(), "ForwardConditions", "ForwardConditions", 1) + `,`, - `OnFull:` + valueToStringGenerated(this.OnFull) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Schedule) + n += 1 + l + sovGenerated(uint64(l)) + if m.Timezone != nil { + l = len(*m.Timezone) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *FixedWindow) String() string { - if this == nil { - return "nil" + +func (m *SideInputsManagerTemplate) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&FixedWindow{`, - `Length:` + strings.Replace(fmt.Sprintf("%v", this.Length), "Duration", "v11.Duration", 1) + `,`, - `Streaming:` + fmt.Sprintf("%v", this.Streaming) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.AbstractPodTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.ContainerTemplate != nil { + l = m.ContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.InitContainerTemplate != nil { + l = m.InitContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *ForwardConditions) String() string { - if this == nil { - return "nil" + +func (m *Sink) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ForwardConditions{`, - `Tags:` + strings.Replace(this.Tags.String(), "TagConditions", "TagConditions", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.AbstractSink.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Fallback != nil { + l = m.Fallback.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *Function) String() string { - if this == nil { - return "nil" + +func (m *SlidingWindow) Size() (n int) { + if m == nil { + return 0 } - keysForKWArgs := make([]string, 0, len(this.KWArgs)) - for k := range this.KWArgs { - keysForKWArgs = append(keysForKWArgs, k) + var l int + _ = l + if m.Length != nil { + l = m.Length.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForKWArgs) - mapStringForKWArgs := "map[string]string{" - for _, k := range keysForKWArgs { - mapStringForKWArgs += fmt.Sprintf("%v: %v,", k, this.KWArgs[k]) + if m.Slide != nil { + l = m.Slide.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForKWArgs += "}" - s := strings.Join([]string{`&Function{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Args:` + fmt.Sprintf("%v", this.Args) + `,`, - `KWArgs:` + mapStringForKWArgs + `,`, - `}`, - }, "") - return s + n += 2 + return n } -func (this *GSSAPI) String() string { - if this == nil { - return "nil" + +func (m *Source) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&GSSAPI{`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `Realm:` + fmt.Sprintf("%v", this.Realm) + `,`, - `UsernameSecret:` + strings.Replace(fmt.Sprintf("%v", this.UsernameSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `AuthType:` + valueToStringGenerated(this.AuthType) + `,`, - `PasswordSecret:` + strings.Replace(fmt.Sprintf("%v", this.PasswordSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `KeytabSecret:` + strings.Replace(fmt.Sprintf("%v", this.KeytabSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `KerberosConfigSecret:` + strings.Replace(fmt.Sprintf("%v", this.KerberosConfigSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *GeneratorSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Generator != nil { + l = m.Generator.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&GeneratorSource{`, - `RPU:` + valueToStringGenerated(this.RPU) + `,`, - `Duration:` + strings.Replace(fmt.Sprintf("%v", this.Duration), "Duration", "v11.Duration", 1) + `,`, - `MsgSize:` + valueToStringGenerated(this.MsgSize) + `,`, - `KeyCount:` + valueToStringGenerated(this.KeyCount) + `,`, - `Value:` + valueToStringGenerated(this.Value) + `,`, - `Jitter:` + strings.Replace(fmt.Sprintf("%v", this.Jitter), "Duration", "v11.Duration", 1) + `,`, - `ValueBlob:` + valueToStringGenerated(this.ValueBlob) + `,`, - `}`, - }, "") - return s -} -func (this *GetDaemonDeploymentReq) String() string { - if this == nil { - return "nil" + if m.Kafka != nil { + l = m.Kafka.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForEnv += "}" - s := strings.Join([]string{`&GetDaemonDeploymentReq{`, - `ISBSvcType:` + fmt.Sprintf("%v", this.ISBSvcType) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`, - `Env:` + repeatedStringForEnv + `,`, - `DefaultResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DefaultResources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *GetJetStreamServiceSpecReq) String() string { - if this == nil { - return "nil" + if m.Nats != nil { + l = m.Nats.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForLabels := make([]string, 0, len(this.Labels)) - for k := range this.Labels { - keysForLabels = append(keysForLabels, k) + if m.UDTransformer != nil { + l = m.UDTransformer.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + if m.UDSource != nil { + l = m.UDSource.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForLabels += "}" - s := strings.Join([]string{`&GetJetStreamServiceSpecReq{`, - `Labels:` + mapStringForLabels + `,`, - `ClusterPort:` + fmt.Sprintf("%v", this.ClusterPort) + `,`, - `ClientPort:` + fmt.Sprintf("%v", this.ClientPort) + `,`, - `MonitorPort:` + fmt.Sprintf("%v", this.MonitorPort) + `,`, - `MetricsPort:` + fmt.Sprintf("%v", this.MetricsPort) + `,`, - `}`, - }, "") - return s -} -func (this *GetJetStreamStatefulSetSpecReq) String() string { - if this == nil { - return "nil" + if m.JetStream != nil { + l = m.JetStream.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForLabels := make([]string, 0, len(this.Labels)) - for k := range this.Labels { - keysForLabels = append(keysForLabels, k) + if m.Serving != nil { + l = m.Serving.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + return n +} + +func (m *Status) Size() (n int) { + if m == nil { + return 0 } - mapStringForLabels += "}" - s := strings.Join([]string{`&GetJetStreamStatefulSetSpecReq{`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `Labels:` + mapStringForLabels + `,`, - `NatsImage:` + fmt.Sprintf("%v", this.NatsImage) + `,`, - `MetricsExporterImage:` + fmt.Sprintf("%v", this.MetricsExporterImage) + `,`, - `ConfigReloaderImage:` + fmt.Sprintf("%v", this.ConfigReloaderImage) + `,`, - `ClusterPort:` + fmt.Sprintf("%v", this.ClusterPort) + `,`, - `ClientPort:` + fmt.Sprintf("%v", this.ClientPort) + `,`, - `MonitorPort:` + fmt.Sprintf("%v", this.MonitorPort) + `,`, - `MetricsPort:` + fmt.Sprintf("%v", this.MetricsPort) + `,`, - `ServerAuthSecretName:` + fmt.Sprintf("%v", this.ServerAuthSecretName) + `,`, - `ServerEncryptionSecretName:` + fmt.Sprintf("%v", this.ServerEncryptionSecretName) + `,`, - `ConfigMapName:` + fmt.Sprintf("%v", this.ConfigMapName) + `,`, - `PvcNameIfNeeded:` + fmt.Sprintf("%v", this.PvcNameIfNeeded) + `,`, - `StartCommand:` + fmt.Sprintf("%v", this.StartCommand) + `,`, - `DefaultResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DefaultResources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *GetRedisServiceSpecReq) String() string { - if this == nil { - return "nil" + +func (m *TLS) Size() (n int) { + if m == nil { + return 0 } - keysForLabels := make([]string, 0, len(this.Labels)) - for k := range this.Labels { - keysForLabels = append(keysForLabels, k) + var l int + _ = l + n += 2 + if m.CACertSecret != nil { + l = m.CACertSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + if m.CertSecret != nil { + l = m.CertSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForLabels += "}" - s := strings.Join([]string{`&GetRedisServiceSpecReq{`, - `Labels:` + mapStringForLabels + `,`, - `RedisContainerPort:` + fmt.Sprintf("%v", this.RedisContainerPort) + `,`, - `SentinelContainerPort:` + fmt.Sprintf("%v", this.SentinelContainerPort) + `,`, - `}`, - }, "") - return s + if m.KeySecret != nil { + l = m.KeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *GetRedisStatefulSetSpecReq) String() string { - if this == nil { - return "nil" + +func (m *TagConditions) Size() (n int) { + if m == nil { + return 0 } - keysForLabels := make([]string, 0, len(this.Labels)) - for k := range this.Labels { - keysForLabels = append(keysForLabels, k) + var l int + _ = l + if m.Operator != nil { + l = len(*m.Operator) + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForLabels += "}" - s := strings.Join([]string{`&GetRedisStatefulSetSpecReq{`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `Labels:` + mapStringForLabels + `,`, - `RedisImage:` + fmt.Sprintf("%v", this.RedisImage) + `,`, - `SentinelImage:` + fmt.Sprintf("%v", this.SentinelImage) + `,`, - `MetricsExporterImage:` + fmt.Sprintf("%v", this.MetricsExporterImage) + `,`, - `InitContainerImage:` + fmt.Sprintf("%v", this.InitContainerImage) + `,`, - `RedisContainerPort:` + fmt.Sprintf("%v", this.RedisContainerPort) + `,`, - `SentinelContainerPort:` + fmt.Sprintf("%v", this.SentinelContainerPort) + `,`, - `RedisMetricsContainerPort:` + fmt.Sprintf("%v", this.RedisMetricsContainerPort) + `,`, - `CredentialSecretName:` + fmt.Sprintf("%v", this.CredentialSecretName) + `,`, - `TLSEnabled:` + fmt.Sprintf("%v", this.TLSEnabled) + `,`, - `PvcNameIfNeeded:` + fmt.Sprintf("%v", this.PvcNameIfNeeded) + `,`, - `ConfConfigMapName:` + fmt.Sprintf("%v", this.ConfConfigMapName) + `,`, - `ScriptsConfigMapName:` + fmt.Sprintf("%v", this.ScriptsConfigMapName) + `,`, - `HealthConfigMapName:` + fmt.Sprintf("%v", this.HealthConfigMapName) + `,`, - `DefaultResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DefaultResources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *GetSideInputDeploymentReq) String() string { - if this == nil { - return "nil" + +func (m *Templates) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + var l int + _ = l + if m.DaemonTemplate != nil { + l = m.DaemonTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForEnv += "}" - s := strings.Join([]string{`&GetSideInputDeploymentReq{`, - `ISBSvcType:` + fmt.Sprintf("%v", this.ISBSvcType) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`, - `Env:` + repeatedStringForEnv + `,`, - `DefaultResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DefaultResources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *GetVertexPodSpecReq) String() string { - if this == nil { - return "nil" + if m.JobTemplate != nil { + l = m.JobTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + if m.SideInputsManagerTemplate != nil { + l = m.SideInputsManagerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForEnv += "}" - s := strings.Join([]string{`&GetVertexPodSpecReq{`, - `ISBSvcType:` + fmt.Sprintf("%v", this.ISBSvcType) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`, - `Env:` + repeatedStringForEnv + `,`, - `SideInputsStoreName:` + fmt.Sprintf("%v", this.SideInputsStoreName) + `,`, - `ServingSourceStreamName:` + fmt.Sprintf("%v", this.ServingSourceStreamName) + `,`, - `PipelineSpec:` + strings.Replace(strings.Replace(this.PipelineSpec.String(), "PipelineSpec", "PipelineSpec", 1), `&`, ``, 1) + `,`, - `DefaultResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DefaultResources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *GroupBy) String() string { - if this == nil { - return "nil" + if m.VertexTemplate != nil { + l = m.VertexTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&GroupBy{`, - `Window:` + strings.Replace(strings.Replace(this.Window.String(), "Window", "Window", 1), `&`, ``, 1) + `,`, - `Keyed:` + fmt.Sprintf("%v", this.Keyed) + `,`, - `AllowedLateness:` + strings.Replace(fmt.Sprintf("%v", this.AllowedLateness), "Duration", "v11.Duration", 1) + `,`, - `Storage:` + strings.Replace(this.Storage.String(), "PBQStorage", "PBQStorage", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *HTTPSource) String() string { - if this == nil { - return "nil" + +func (m *Transformer) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HTTPSource{`, - `Auth:` + strings.Replace(this.Auth.String(), "Authorization", "Authorization", 1) + `,`, - `Service:` + fmt.Sprintf("%v", this.Service) + `,`, - `}`, - }, "") - return s -} -func (this *IdleSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&IdleSource{`, - `Threshold:` + strings.Replace(fmt.Sprintf("%v", this.Threshold), "Duration", "v11.Duration", 1) + `,`, - `StepInterval:` + strings.Replace(fmt.Sprintf("%v", this.StepInterval), "Duration", "v11.Duration", 1) + `,`, - `IncrementBy:` + strings.Replace(fmt.Sprintf("%v", this.IncrementBy), "Duration", "v11.Duration", 1) + `,`, - `}`, - }, "") - return s -} -func (this *InterStepBufferService) String() string { - if this == nil { - return "nil" + if len(m.KWArgs) > 0 { + for k, v := range m.KWArgs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&InterStepBufferService{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "InterStepBufferServiceSpec", "InterStepBufferServiceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "InterStepBufferServiceStatus", "InterStepBufferServiceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *InterStepBufferServiceList) String() string { - if this == nil { - return "nil" + +func (m *UDF) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForItems := "[]InterStepBufferService{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "InterStepBufferService", "InterStepBufferService", 1), `&`, ``, 1) + "," + var l int + _ = l + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems += "}" - s := strings.Join([]string{`&InterStepBufferServiceList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *InterStepBufferServiceSpec) String() string { - if this == nil { - return "nil" + if m.Builtin != nil { + l = m.Builtin.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&InterStepBufferServiceSpec{`, - `Redis:` + strings.Replace(this.Redis.String(), "RedisBufferService", "RedisBufferService", 1) + `,`, - `JetStream:` + strings.Replace(this.JetStream.String(), "JetStreamBufferService", "JetStreamBufferService", 1) + `,`, - `}`, - }, "") - return s -} -func (this *InterStepBufferServiceStatus) String() string { - if this == nil { - return "nil" + if m.GroupBy != nil { + l = m.GroupBy.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&InterStepBufferServiceStatus{`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "Status", "Status", 1), `&`, ``, 1) + `,`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Config:` + strings.Replace(strings.Replace(this.Config.String(), "BufferServiceConfig", "BufferServiceConfig", 1), `&`, ``, 1) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `}`, - }, "") - return s + return n } -func (this *JetStreamBufferService) String() string { - if this == nil { - return "nil" + +func (m *UDSink) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&JetStreamBufferService{`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `ReloaderContainerTemplate:` + strings.Replace(this.ReloaderContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `MetricsContainerTemplate:` + strings.Replace(this.MetricsContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `Persistence:` + strings.Replace(this.Persistence.String(), "PersistenceStrategy", "PersistenceStrategy", 1) + `,`, - `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, - `Settings:` + valueToStringGenerated(this.Settings) + `,`, - `StartArgs:` + fmt.Sprintf("%v", this.StartArgs) + `,`, - `BufferConfig:` + valueToStringGenerated(this.BufferConfig) + `,`, - `Encryption:` + fmt.Sprintf("%v", this.Encryption) + `,`, - `TLS:` + fmt.Sprintf("%v", this.TLS) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *JetStreamConfig) String() string { - if this == nil { - return "nil" + +func (m *UDSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&JetStreamConfig{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `Auth:` + strings.Replace(this.Auth.String(), "NatsAuth", "NatsAuth", 1) + `,`, - `StreamConfig:` + fmt.Sprintf("%v", this.StreamConfig) + `,`, - `TLSEnabled:` + fmt.Sprintf("%v", this.TLSEnabled) + `,`, - `}`, - }, "") - return s -} -func (this *JetStreamSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&JetStreamSource{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `Stream:` + fmt.Sprintf("%v", this.Stream) + `,`, - `TLS:` + strings.Replace(this.TLS.String(), "TLS", "TLS", 1) + `,`, - `Auth:` + strings.Replace(this.Auth.String(), "NatsAuth", "NatsAuth", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *JobTemplate) String() string { - if this == nil { - return "nil" + +func (m *UDTransformer) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&JobTemplate{`, - `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, - `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`, - `BackoffLimit:` + valueToStringGenerated(this.BackoffLimit) + `,`, - `}`, - }, "") - return s -} -func (this *KafkaSink) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&KafkaSink{`, - `Brokers:` + fmt.Sprintf("%v", this.Brokers) + `,`, - `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, - `TLS:` + strings.Replace(this.TLS.String(), "TLS", "TLS", 1) + `,`, - `Config:` + fmt.Sprintf("%v", this.Config) + `,`, - `SASL:` + strings.Replace(this.SASL.String(), "SASL", "SASL", 1) + `,`, - `}`, - }, "") - return s -} -func (this *KafkaSource) String() string { - if this == nil { - return "nil" + if m.Builtin != nil { + l = m.Builtin.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&KafkaSource{`, - `Brokers:` + fmt.Sprintf("%v", this.Brokers) + `,`, - `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, - `ConsumerGroupName:` + fmt.Sprintf("%v", this.ConsumerGroupName) + `,`, - `TLS:` + strings.Replace(this.TLS.String(), "TLS", "TLS", 1) + `,`, - `Config:` + fmt.Sprintf("%v", this.Config) + `,`, - `SASL:` + strings.Replace(this.SASL.String(), "SASL", "SASL", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *Lifecycle) String() string { - if this == nil { - return "nil" + +func (m *Vertex) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Lifecycle{`, - `DeleteGracePeriodSeconds:` + valueToStringGenerated(this.DeleteGracePeriodSeconds) + `,`, - `DesiredPhase:` + fmt.Sprintf("%v", this.DesiredPhase) + `,`, - `PauseGracePeriodSeconds:` + valueToStringGenerated(this.PauseGracePeriodSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *Log) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VertexInstance) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Log{`, - `}`, - }, "") - return s + var l int + _ = l + if m.Vertex != nil { + l = m.Vertex.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Replica)) + return n } -func (this *Metadata) String() string { - if this == nil { - return "nil" + +func (m *VertexLimits) Size() (n int) { + if m == nil { + return 0 } - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) + var l int + _ = l + if m.ReadBatchSize != nil { + n += 1 + sovGenerated(uint64(*m.ReadBatchSize)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + if m.ReadTimeout != nil { + l = m.ReadTimeout.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForAnnotations += "}" - keysForLabels := make([]string, 0, len(this.Labels)) - for k := range this.Labels { - keysForLabels = append(keysForLabels, k) + if m.BufferMaxLength != nil { + n += 1 + sovGenerated(uint64(*m.BufferMaxLength)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + if m.BufferUsageLimit != nil { + n += 1 + sovGenerated(uint64(*m.BufferUsageLimit)) } - mapStringForLabels += "}" - s := strings.Join([]string{`&Metadata{`, - `Annotations:` + mapStringForAnnotations + `,`, - `Labels:` + mapStringForLabels + `,`, - `}`, - }, "") - return s + return n } -func (this *NativeRedis) String() string { - if this == nil { - return "nil" + +func (m *VertexList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NativeRedis{`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `RedisContainerTemplate:` + strings.Replace(this.RedisContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `SentinelContainerTemplate:` + strings.Replace(this.SentinelContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `MetricsContainerTemplate:` + strings.Replace(this.MetricsContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `InitContainerTemplate:` + strings.Replace(this.InitContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `Persistence:` + strings.Replace(this.Persistence.String(), "PersistenceStrategy", "PersistenceStrategy", 1) + `,`, - `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, - `Settings:` + strings.Replace(this.Settings.String(), "RedisSettings", "RedisSettings", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NatsAuth) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NatsAuth{`, - `Basic:` + strings.Replace(this.Basic.String(), "BasicAuth", "BasicAuth", 1) + `,`, - `Token:` + strings.Replace(fmt.Sprintf("%v", this.Token), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `NKey:` + strings.Replace(fmt.Sprintf("%v", this.NKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NatsSource) String() string { - if this == nil { - return "nil" + +func (m *VertexSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NatsSource{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `Subject:` + fmt.Sprintf("%v", this.Subject) + `,`, - `Queue:` + fmt.Sprintf("%v", this.Queue) + `,`, - `TLS:` + strings.Replace(this.TLS.String(), "TLS", "TLS", 1) + `,`, - `Auth:` + strings.Replace(this.Auth.String(), "NatsAuth", "NatsAuth", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NoStore) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.AbstractVertex.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PipelineName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.InterStepBufferServiceName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) } - s := strings.Join([]string{`&NoStore{`, - `}`, - }, "") - return s -} -func (this *PBQStorage) String() string { - if this == nil { - return "nil" + if len(m.FromEdges) > 0 { + for _, e := range m.FromEdges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PBQStorage{`, - `PersistentVolumeClaim:` + strings.Replace(this.PersistentVolumeClaim.String(), "PersistenceStrategy", "PersistenceStrategy", 1) + `,`, - `EmptyDir:` + strings.Replace(fmt.Sprintf("%v", this.EmptyDir), "EmptyDirVolumeSource", "v1.EmptyDirVolumeSource", 1) + `,`, - `NoStore:` + strings.Replace(this.NoStore.String(), "NoStore", "NoStore", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistenceStrategy) String() string { - if this == nil { - return "nil" + if len(m.ToEdges) > 0 { + for _, e := range m.ToEdges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PersistenceStrategy{`, - `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, - `AccessMode:` + valueToStringGenerated(this.AccessMode) + `,`, - `VolumeSize:` + strings.Replace(fmt.Sprintf("%v", this.VolumeSize), "Quantity", "resource.Quantity", 1) + `,`, - `}`, - }, "") - return s + l = m.Watermark.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Pipeline) String() string { - if this == nil { - return "nil" + +func (m *VertexStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Pipeline{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PipelineSpec", "PipelineSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PipelineStatus", "PipelineStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Replicas)) + l = len(m.Selector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastScaledAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + return n } -func (this *PipelineLimits) String() string { - if this == nil { - return "nil" + +func (m *VertexTemplate) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PipelineLimits{`, - `ReadBatchSize:` + valueToStringGenerated(this.ReadBatchSize) + `,`, - `BufferMaxLength:` + valueToStringGenerated(this.BufferMaxLength) + `,`, - `BufferUsageLimit:` + valueToStringGenerated(this.BufferUsageLimit) + `,`, - `ReadTimeout:` + strings.Replace(fmt.Sprintf("%v", this.ReadTimeout), "Duration", "v11.Duration", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PipelineList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.AbstractPodTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.ContainerTemplate != nil { + l = m.ContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems := "[]Pipeline{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Pipeline", "Pipeline", 1), `&`, ``, 1) + "," + if m.InitContainerTemplate != nil { + l = m.InitContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PipelineList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s + return n } -func (this *PipelineSpec) String() string { - if this == nil { - return "nil" + +func (m *Watermark) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForVertices := "[]AbstractVertex{" - for _, f := range this.Vertices { - repeatedStringForVertices += strings.Replace(strings.Replace(f.String(), "AbstractVertex", "AbstractVertex", 1), `&`, ``, 1) + "," + var l int + _ = l + n += 2 + if m.MaxDelay != nil { + l = m.MaxDelay.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForVertices += "}" - repeatedStringForEdges := "[]Edge{" - for _, f := range this.Edges { - repeatedStringForEdges += strings.Replace(strings.Replace(f.String(), "Edge", "Edge", 1), `&`, ``, 1) + "," + if m.IdleSource != nil { + l = m.IdleSource.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForEdges += "}" - repeatedStringForSideInputs := "[]SideInput{" - for _, f := range this.SideInputs { - repeatedStringForSideInputs += strings.Replace(strings.Replace(f.String(), "SideInput", "SideInput", 1), `&`, ``, 1) + "," + return n +} + +func (m *Window) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForSideInputs += "}" - s := strings.Join([]string{`&PipelineSpec{`, - `InterStepBufferServiceName:` + fmt.Sprintf("%v", this.InterStepBufferServiceName) + `,`, - `Vertices:` + repeatedStringForVertices + `,`, - `Edges:` + repeatedStringForEdges + `,`, - `Lifecycle:` + strings.Replace(strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1), `&`, ``, 1) + `,`, - `Limits:` + strings.Replace(this.Limits.String(), "PipelineLimits", "PipelineLimits", 1) + `,`, - `Watermark:` + strings.Replace(strings.Replace(this.Watermark.String(), "Watermark", "Watermark", 1), `&`, ``, 1) + `,`, - `Templates:` + strings.Replace(this.Templates.String(), "Templates", "Templates", 1) + `,`, - `SideInputs:` + repeatedStringForSideInputs + `,`, - `}`, - }, "") - return s + var l int + _ = l + if m.Fixed != nil { + l = m.Fixed.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Sliding != nil { + l = m.Sliding.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Session != nil { + l = m.Session.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *PipelineStatus) String() string { + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AbstractPodTemplate) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&PipelineStatus{`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "Status", "Status", 1), `&`, ``, 1) + `,`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdated:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdated), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, - `VertexCount:` + valueToStringGenerated(this.VertexCount) + `,`, - `SourceCount:` + valueToStringGenerated(this.SourceCount) + `,`, - `SinkCount:` + valueToStringGenerated(this.SinkCount) + `,`, - `UDFCount:` + valueToStringGenerated(this.UDFCount) + `,`, - `MapUDFCount:` + valueToStringGenerated(this.MapUDFCount) + `,`, - `ReduceUDFCount:` + valueToStringGenerated(this.ReduceUDFCount) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," + } + repeatedStringForImagePullSecrets += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&AbstractPodTemplate{`, + `Metadata:` + strings.Replace(this.Metadata.String(), "Metadata", "Metadata", 1) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, + `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "v1.PodDNSConfig", 1) + `,`, `}`, }, "") return s } -func (this *RedisBufferService) String() string { +func (this *AbstractSink) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&RedisBufferService{`, - `Native:` + strings.Replace(this.Native.String(), "NativeRedis", "NativeRedis", 1) + `,`, - `External:` + strings.Replace(this.External.String(), "RedisConfig", "RedisConfig", 1) + `,`, + s := strings.Join([]string{`&AbstractSink{`, + `Log:` + strings.Replace(this.Log.String(), "Log", "Log", 1) + `,`, + `Kafka:` + strings.Replace(this.Kafka.String(), "KafkaSink", "KafkaSink", 1) + `,`, + `Blackhole:` + strings.Replace(this.Blackhole.String(), "Blackhole", "Blackhole", 1) + `,`, + `UDSink:` + strings.Replace(this.UDSink.String(), "UDSink", "UDSink", 1) + `,`, `}`, }, "") return s } -func (this *RedisConfig) String() string { +func (this *AbstractVertex) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&RedisConfig{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `SentinelURL:` + fmt.Sprintf("%v", this.SentinelURL) + `,`, - `MasterName:` + fmt.Sprintf("%v", this.MasterName) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Password:` + strings.Replace(fmt.Sprintf("%v", this.Password), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `SentinelPassword:` + strings.Replace(fmt.Sprintf("%v", this.SentinelPassword), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForInitContainers := "[]Container{" + for _, f := range this.InitContainers { + repeatedStringForInitContainers += fmt.Sprintf("%v", f) + "," + } + repeatedStringForInitContainers += "}" + repeatedStringForSidecars := "[]Container{" + for _, f := range this.Sidecars { + repeatedStringForSidecars += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSidecars += "}" + s := strings.Join([]string{`&AbstractVertex{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Source:` + strings.Replace(this.Source.String(), "Source", "Source", 1) + `,`, + `Sink:` + strings.Replace(this.Sink.String(), "Sink", "Sink", 1) + `,`, + `UDF:` + strings.Replace(this.UDF.String(), "UDF", "UDF", 1) + `,`, + `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `InitContainerTemplate:` + strings.Replace(this.InitContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `Limits:` + strings.Replace(this.Limits.String(), "VertexLimits", "VertexLimits", 1) + `,`, + `Scale:` + strings.Replace(strings.Replace(this.Scale.String(), "Scale", "Scale", 1), `&`, ``, 1) + `,`, + `InitContainers:` + repeatedStringForInitContainers + `,`, + `Sidecars:` + repeatedStringForSidecars + `,`, + `Partitions:` + valueToStringGenerated(this.Partitions) + `,`, + `SideInputs:` + fmt.Sprintf("%v", this.SideInputs) + `,`, + `SideInputsContainerTemplate:` + strings.Replace(this.SideInputsContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, `}`, }, "") return s } -func (this *RedisSettings) String() string { +func (this *Authorization) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&RedisSettings{`, - `Redis:` + fmt.Sprintf("%v", this.Redis) + `,`, - `Master:` + fmt.Sprintf("%v", this.Master) + `,`, - `Replica:` + fmt.Sprintf("%v", this.Replica) + `,`, - `Sentinel:` + fmt.Sprintf("%v", this.Sentinel) + `,`, + s := strings.Join([]string{`&Authorization{`, + `Token:` + strings.Replace(fmt.Sprintf("%v", this.Token), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, `}`, }, "") return s } -func (this *SASL) String() string { +func (this *BasicAuth) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&SASL{`, - `Mechanism:` + valueToStringGenerated(this.Mechanism) + `,`, - `GSSAPI:` + strings.Replace(this.GSSAPI.String(), "GSSAPI", "GSSAPI", 1) + `,`, - `Plain:` + strings.Replace(this.Plain.String(), "SASLPlain", "SASLPlain", 1) + `,`, - `SCRAMSHA256:` + strings.Replace(this.SCRAMSHA256.String(), "SASLPlain", "SASLPlain", 1) + `,`, - `SCRAMSHA512:` + strings.Replace(this.SCRAMSHA512.String(), "SASLPlain", "SASLPlain", 1) + `,`, + s := strings.Join([]string{`&BasicAuth{`, + `User:` + strings.Replace(fmt.Sprintf("%v", this.User), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Password:` + strings.Replace(fmt.Sprintf("%v", this.Password), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, `}`, }, "") return s } -func (this *SASLPlain) String() string { +func (this *Blackhole) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&SASLPlain{`, - `UserSecret:` + strings.Replace(fmt.Sprintf("%v", this.UserSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `PasswordSecret:` + strings.Replace(fmt.Sprintf("%v", this.PasswordSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `Handshake:` + fmt.Sprintf("%v", this.Handshake) + `,`, + s := strings.Join([]string{`&Blackhole{`, `}`, }, "") return s } -func (this *Scale) String() string { +func (this *BufferServiceConfig) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Scale{`, - `Disabled:` + fmt.Sprintf("%v", this.Disabled) + `,`, - `Min:` + valueToStringGenerated(this.Min) + `,`, - `Max:` + valueToStringGenerated(this.Max) + `,`, - `LookbackSeconds:` + valueToStringGenerated(this.LookbackSeconds) + `,`, - `DeprecatedCooldownSeconds:` + valueToStringGenerated(this.DeprecatedCooldownSeconds) + `,`, - `ZeroReplicaSleepSeconds:` + valueToStringGenerated(this.ZeroReplicaSleepSeconds) + `,`, - `TargetProcessingSeconds:` + valueToStringGenerated(this.TargetProcessingSeconds) + `,`, - `TargetBufferAvailability:` + valueToStringGenerated(this.TargetBufferAvailability) + `,`, - `ReplicasPerScale:` + valueToStringGenerated(this.ReplicasPerScale) + `,`, - `ScaleUpCooldownSeconds:` + valueToStringGenerated(this.ScaleUpCooldownSeconds) + `,`, - `ScaleDownCooldownSeconds:` + valueToStringGenerated(this.ScaleDownCooldownSeconds) + `,`, + s := strings.Join([]string{`&BufferServiceConfig{`, + `Redis:` + strings.Replace(this.Redis.String(), "RedisConfig", "RedisConfig", 1) + `,`, + `JetStream:` + strings.Replace(this.JetStream.String(), "JetStreamConfig", "JetStreamConfig", 1) + `,`, `}`, }, "") return s } -func (this *ServingSource) String() string { +func (this *CombinedEdge) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ServingSource{`, - `Auth:` + strings.Replace(this.Auth.String(), "Authorization", "Authorization", 1) + `,`, - `Service:` + fmt.Sprintf("%v", this.Service) + `,`, - `MsgIDHeaderKey:` + valueToStringGenerated(this.MsgIDHeaderKey) + `,`, - `Store:` + strings.Replace(this.Store.String(), "ServingStore", "ServingStore", 1) + `,`, + s := strings.Join([]string{`&CombinedEdge{`, + `Edge:` + strings.Replace(strings.Replace(this.Edge.String(), "Edge", "Edge", 1), `&`, ``, 1) + `,`, + `FromVertexType:` + fmt.Sprintf("%v", this.FromVertexType) + `,`, + `FromVertexPartitionCount:` + valueToStringGenerated(this.FromVertexPartitionCount) + `,`, + `FromVertexLimits:` + strings.Replace(this.FromVertexLimits.String(), "VertexLimits", "VertexLimits", 1) + `,`, + `ToVertexType:` + fmt.Sprintf("%v", this.ToVertexType) + `,`, + `ToVertexPartitionCount:` + valueToStringGenerated(this.ToVertexPartitionCount) + `,`, + `ToVertexLimits:` + strings.Replace(this.ToVertexLimits.String(), "VertexLimits", "VertexLimits", 1) + `,`, `}`, }, "") return s } -func (this *ServingStore) String() string { +func (this *Container) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ServingStore{`, - `URL:` + valueToStringGenerated(this.URL) + `,`, - `TTL:` + strings.Replace(fmt.Sprintf("%v", this.TTL), "Duration", "v11.Duration", 1) + `,`, + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeMounts += "}" + s := strings.Join([]string{`&Container{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "v1.SecurityContext", 1) + `,`, + `ImagePullPolicy:` + valueToStringGenerated(this.ImagePullPolicy) + `,`, `}`, }, "") return s } -func (this *SessionWindow) String() string { +func (this *ContainerTemplate) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&SessionWindow{`, - `Timeout:` + strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "v11.Duration", 1) + `,`, - `}`, - }, "") - return s -} -func (this *SideInput) String() string { - if this == nil { - return "nil" - } - repeatedStringForVolumes := "[]Volume{" - for _, f := range this.Volumes { - repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," } - repeatedStringForVolumes += "}" - s := strings.Join([]string{`&SideInput{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Container:` + strings.Replace(this.Container.String(), "Container", "Container", 1) + `,`, - `Volumes:` + repeatedStringForVolumes + `,`, - `Trigger:` + strings.Replace(this.Trigger.String(), "SideInputTrigger", "SideInputTrigger", 1) + `,`, - `}`, - }, "") - return s -} -func (this *SideInputTrigger) String() string { - if this == nil { - return "nil" + repeatedStringForEnv += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," } - s := strings.Join([]string{`&SideInputTrigger{`, - `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, - `Timezone:` + valueToStringGenerated(this.Timezone) + `,`, + repeatedStringForEnvFrom += "}" + s := strings.Join([]string{`&ContainerTemplate{`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "v1.SecurityContext", 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, `}`, }, "") return s } -func (this *SideInputsManagerTemplate) String() string { +func (this *DaemonTemplate) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&SideInputsManagerTemplate{`, + s := strings.Join([]string{`&DaemonTemplate{`, `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, `InitContainerTemplate:` + strings.Replace(this.InitContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, `}`, }, "") return s } -func (this *Sink) String() string { +func (this *Edge) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Sink{`, - `AbstractSink:` + strings.Replace(strings.Replace(this.AbstractSink.String(), "AbstractSink", "AbstractSink", 1), `&`, ``, 1) + `,`, - `Fallback:` + strings.Replace(this.Fallback.String(), "AbstractSink", "AbstractSink", 1) + `,`, + s := strings.Join([]string{`&Edge{`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `To:` + fmt.Sprintf("%v", this.To) + `,`, + `Conditions:` + strings.Replace(this.Conditions.String(), "ForwardConditions", "ForwardConditions", 1) + `,`, + `OnFull:` + valueToStringGenerated(this.OnFull) + `,`, `}`, }, "") return s } -func (this *SlidingWindow) String() string { +func (this *FixedWindow) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&SlidingWindow{`, + s := strings.Join([]string{`&FixedWindow{`, `Length:` + strings.Replace(fmt.Sprintf("%v", this.Length), "Duration", "v11.Duration", 1) + `,`, - `Slide:` + strings.Replace(fmt.Sprintf("%v", this.Slide), "Duration", "v11.Duration", 1) + `,`, `Streaming:` + fmt.Sprintf("%v", this.Streaming) + `,`, `}`, }, "") return s } -func (this *Source) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Source{`, - `Generator:` + strings.Replace(this.Generator.String(), "GeneratorSource", "GeneratorSource", 1) + `,`, - `Kafka:` + strings.Replace(this.Kafka.String(), "KafkaSource", "KafkaSource", 1) + `,`, - `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPSource", "HTTPSource", 1) + `,`, - `Nats:` + strings.Replace(this.Nats.String(), "NatsSource", "NatsSource", 1) + `,`, - `UDTransformer:` + strings.Replace(this.UDTransformer.String(), "UDTransformer", "UDTransformer", 1) + `,`, - `UDSource:` + strings.Replace(this.UDSource.String(), "UDSource", "UDSource", 1) + `,`, - `JetStream:` + strings.Replace(this.JetStream.String(), "JetStreamSource", "JetStreamSource", 1) + `,`, - `Serving:` + strings.Replace(this.Serving.String(), "ServingSource", "ServingSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Status) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&Status{`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *TLS) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TLS{`, - `InsecureSkipVerify:` + fmt.Sprintf("%v", this.InsecureSkipVerify) + `,`, - `CACertSecret:` + strings.Replace(fmt.Sprintf("%v", this.CACertSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `CertSecret:` + strings.Replace(fmt.Sprintf("%v", this.CertSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `KeySecret:` + strings.Replace(fmt.Sprintf("%v", this.KeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *TagConditions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TagConditions{`, - `Operator:` + valueToStringGenerated(this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *Templates) String() string { +func (this *ForwardConditions) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Templates{`, - `DaemonTemplate:` + strings.Replace(this.DaemonTemplate.String(), "DaemonTemplate", "DaemonTemplate", 1) + `,`, - `JobTemplate:` + strings.Replace(this.JobTemplate.String(), "JobTemplate", "JobTemplate", 1) + `,`, - `SideInputsManagerTemplate:` + strings.Replace(this.SideInputsManagerTemplate.String(), "SideInputsManagerTemplate", "SideInputsManagerTemplate", 1) + `,`, - `VertexTemplate:` + strings.Replace(this.VertexTemplate.String(), "VertexTemplate", "VertexTemplate", 1) + `,`, + s := strings.Join([]string{`&ForwardConditions{`, + `Tags:` + strings.Replace(this.Tags.String(), "TagConditions", "TagConditions", 1) + `,`, `}`, }, "") return s } -func (this *Transformer) String() string { +func (this *Function) String() string { if this == nil { return "nil" } @@ -11418,7 +11404,7 @@ func (this *Transformer) String() string { mapStringForKWArgs += fmt.Sprintf("%v: %v,", k, this.KWArgs[k]) } mapStringForKWArgs += "}" - s := strings.Join([]string{`&Transformer{`, + s := strings.Join([]string{`&Function{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Args:` + fmt.Sprintf("%v", this.Args) + `,`, `KWArgs:` + mapStringForKWArgs + `,`, @@ -11426,221 +11412,2124 @@ func (this *Transformer) String() string { }, "") return s } -func (this *UDF) String() string { +func (this *GSSAPI) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&UDF{`, - `Container:` + strings.Replace(this.Container.String(), "Container", "Container", 1) + `,`, - `Builtin:` + strings.Replace(this.Builtin.String(), "Function", "Function", 1) + `,`, - `GroupBy:` + strings.Replace(this.GroupBy.String(), "GroupBy", "GroupBy", 1) + `,`, + s := strings.Join([]string{`&GSSAPI{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `Realm:` + fmt.Sprintf("%v", this.Realm) + `,`, + `UsernameSecret:` + strings.Replace(fmt.Sprintf("%v", this.UsernameSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `AuthType:` + valueToStringGenerated(this.AuthType) + `,`, + `PasswordSecret:` + strings.Replace(fmt.Sprintf("%v", this.PasswordSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `KeytabSecret:` + strings.Replace(fmt.Sprintf("%v", this.KeytabSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `KerberosConfigSecret:` + strings.Replace(fmt.Sprintf("%v", this.KerberosConfigSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, `}`, }, "") return s } -func (this *UDSink) String() string { +func (this *GeneratorSource) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&UDSink{`, - `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&GeneratorSource{`, + `RPU:` + valueToStringGenerated(this.RPU) + `,`, + `Duration:` + strings.Replace(fmt.Sprintf("%v", this.Duration), "Duration", "v11.Duration", 1) + `,`, + `MsgSize:` + valueToStringGenerated(this.MsgSize) + `,`, + `KeyCount:` + valueToStringGenerated(this.KeyCount) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `Jitter:` + strings.Replace(fmt.Sprintf("%v", this.Jitter), "Duration", "v11.Duration", 1) + `,`, + `ValueBlob:` + valueToStringGenerated(this.ValueBlob) + `,`, `}`, }, "") return s } -func (this *UDSource) String() string { +func (this *GetDaemonDeploymentReq) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&UDSource{`, - `Container:` + strings.Replace(this.Container.String(), "Container", "Container", 1) + `,`, + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&GetDaemonDeploymentReq{`, + `ISBSvcType:` + fmt.Sprintf("%v", this.ISBSvcType) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `DefaultResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DefaultResources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *UDTransformer) String() string { +func (this *GetJetStreamServiceSpecReq) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&UDTransformer{`, - `Container:` + strings.Replace(this.Container.String(), "Container", "Container", 1) + `,`, - `Builtin:` + strings.Replace(this.Builtin.String(), "Transformer", "Transformer", 1) + `,`, + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&GetJetStreamServiceSpecReq{`, + `Labels:` + mapStringForLabels + `,`, + `ClusterPort:` + fmt.Sprintf("%v", this.ClusterPort) + `,`, + `ClientPort:` + fmt.Sprintf("%v", this.ClientPort) + `,`, + `MonitorPort:` + fmt.Sprintf("%v", this.MonitorPort) + `,`, + `MetricsPort:` + fmt.Sprintf("%v", this.MetricsPort) + `,`, `}`, }, "") return s } -func (this *Vertex) String() string { +func (this *GetJetStreamStatefulSetSpecReq) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Vertex{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VertexSpec", "VertexSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VertexStatus", "VertexStatus", 1), `&`, ``, 1) + `,`, + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&GetJetStreamStatefulSetSpecReq{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NatsImage:` + fmt.Sprintf("%v", this.NatsImage) + `,`, + `MetricsExporterImage:` + fmt.Sprintf("%v", this.MetricsExporterImage) + `,`, + `ConfigReloaderImage:` + fmt.Sprintf("%v", this.ConfigReloaderImage) + `,`, + `ClusterPort:` + fmt.Sprintf("%v", this.ClusterPort) + `,`, + `ClientPort:` + fmt.Sprintf("%v", this.ClientPort) + `,`, + `MonitorPort:` + fmt.Sprintf("%v", this.MonitorPort) + `,`, + `MetricsPort:` + fmt.Sprintf("%v", this.MetricsPort) + `,`, + `ServerAuthSecretName:` + fmt.Sprintf("%v", this.ServerAuthSecretName) + `,`, + `ServerEncryptionSecretName:` + fmt.Sprintf("%v", this.ServerEncryptionSecretName) + `,`, + `ConfigMapName:` + fmt.Sprintf("%v", this.ConfigMapName) + `,`, + `PvcNameIfNeeded:` + fmt.Sprintf("%v", this.PvcNameIfNeeded) + `,`, + `StartCommand:` + fmt.Sprintf("%v", this.StartCommand) + `,`, + `DefaultResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DefaultResources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *VertexInstance) String() string { +func (this *GetMonoVertexDaemonDeploymentReq) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&VertexInstance{`, - `Vertex:` + strings.Replace(this.Vertex.String(), "Vertex", "Vertex", 1) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `Replica:` + fmt.Sprintf("%v", this.Replica) + `,`, + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&GetMonoVertexDaemonDeploymentReq{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `DefaultResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DefaultResources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *VertexLimits) String() string { +func (this *GetMonoVertexPodSpecReq) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&VertexLimits{`, - `ReadBatchSize:` + valueToStringGenerated(this.ReadBatchSize) + `,`, - `ReadTimeout:` + strings.Replace(fmt.Sprintf("%v", this.ReadTimeout), "Duration", "v11.Duration", 1) + `,`, - `BufferMaxLength:` + valueToStringGenerated(this.BufferMaxLength) + `,`, - `BufferUsageLimit:` + valueToStringGenerated(this.BufferUsageLimit) + `,`, + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&GetMonoVertexPodSpecReq{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `DefaultResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DefaultResources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *VertexList) String() string { +func (this *GetRedisServiceSpecReq) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]Vertex{" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&GetRedisServiceSpecReq{`, + `Labels:` + mapStringForLabels + `,`, + `RedisContainerPort:` + fmt.Sprintf("%v", this.RedisContainerPort) + `,`, + `SentinelContainerPort:` + fmt.Sprintf("%v", this.SentinelContainerPort) + `,`, + `}`, + }, "") + return s +} +func (this *GetRedisStatefulSetSpecReq) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&GetRedisStatefulSetSpecReq{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `Labels:` + mapStringForLabels + `,`, + `RedisImage:` + fmt.Sprintf("%v", this.RedisImage) + `,`, + `SentinelImage:` + fmt.Sprintf("%v", this.SentinelImage) + `,`, + `MetricsExporterImage:` + fmt.Sprintf("%v", this.MetricsExporterImage) + `,`, + `InitContainerImage:` + fmt.Sprintf("%v", this.InitContainerImage) + `,`, + `RedisContainerPort:` + fmt.Sprintf("%v", this.RedisContainerPort) + `,`, + `SentinelContainerPort:` + fmt.Sprintf("%v", this.SentinelContainerPort) + `,`, + `RedisMetricsContainerPort:` + fmt.Sprintf("%v", this.RedisMetricsContainerPort) + `,`, + `CredentialSecretName:` + fmt.Sprintf("%v", this.CredentialSecretName) + `,`, + `TLSEnabled:` + fmt.Sprintf("%v", this.TLSEnabled) + `,`, + `PvcNameIfNeeded:` + fmt.Sprintf("%v", this.PvcNameIfNeeded) + `,`, + `ConfConfigMapName:` + fmt.Sprintf("%v", this.ConfConfigMapName) + `,`, + `ScriptsConfigMapName:` + fmt.Sprintf("%v", this.ScriptsConfigMapName) + `,`, + `HealthConfigMapName:` + fmt.Sprintf("%v", this.HealthConfigMapName) + `,`, + `DefaultResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DefaultResources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetSideInputDeploymentReq) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&GetSideInputDeploymentReq{`, + `ISBSvcType:` + fmt.Sprintf("%v", this.ISBSvcType) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `DefaultResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DefaultResources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetVertexPodSpecReq) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&GetVertexPodSpecReq{`, + `ISBSvcType:` + fmt.Sprintf("%v", this.ISBSvcType) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `SideInputsStoreName:` + fmt.Sprintf("%v", this.SideInputsStoreName) + `,`, + `ServingSourceStreamName:` + fmt.Sprintf("%v", this.ServingSourceStreamName) + `,`, + `PipelineSpec:` + strings.Replace(strings.Replace(this.PipelineSpec.String(), "PipelineSpec", "PipelineSpec", 1), `&`, ``, 1) + `,`, + `DefaultResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DefaultResources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GroupBy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupBy{`, + `Window:` + strings.Replace(strings.Replace(this.Window.String(), "Window", "Window", 1), `&`, ``, 1) + `,`, + `Keyed:` + fmt.Sprintf("%v", this.Keyed) + `,`, + `AllowedLateness:` + strings.Replace(fmt.Sprintf("%v", this.AllowedLateness), "Duration", "v11.Duration", 1) + `,`, + `Storage:` + strings.Replace(this.Storage.String(), "PBQStorage", "PBQStorage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPSource{`, + `Auth:` + strings.Replace(this.Auth.String(), "Authorization", "Authorization", 1) + `,`, + `Service:` + fmt.Sprintf("%v", this.Service) + `,`, + `}`, + }, "") + return s +} +func (this *IdleSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IdleSource{`, + `Threshold:` + strings.Replace(fmt.Sprintf("%v", this.Threshold), "Duration", "v11.Duration", 1) + `,`, + `StepInterval:` + strings.Replace(fmt.Sprintf("%v", this.StepInterval), "Duration", "v11.Duration", 1) + `,`, + `IncrementBy:` + strings.Replace(fmt.Sprintf("%v", this.IncrementBy), "Duration", "v11.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *InterStepBufferService) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InterStepBufferService{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "InterStepBufferServiceSpec", "InterStepBufferServiceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "InterStepBufferServiceStatus", "InterStepBufferServiceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *InterStepBufferServiceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]InterStepBufferService{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Vertex", "Vertex", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "InterStepBufferService", "InterStepBufferService", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&VertexList{`, + s := strings.Join([]string{`&InterStepBufferServiceList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *VertexSpec) String() string { +func (this *InterStepBufferServiceSpec) String() string { if this == nil { return "nil" } - repeatedStringForFromEdges := "[]CombinedEdge{" - for _, f := range this.FromEdges { - repeatedStringForFromEdges += strings.Replace(strings.Replace(f.String(), "CombinedEdge", "CombinedEdge", 1), `&`, ``, 1) + "," - } - repeatedStringForFromEdges += "}" - repeatedStringForToEdges := "[]CombinedEdge{" - for _, f := range this.ToEdges { - repeatedStringForToEdges += strings.Replace(strings.Replace(f.String(), "CombinedEdge", "CombinedEdge", 1), `&`, ``, 1) + "," - } - repeatedStringForToEdges += "}" - s := strings.Join([]string{`&VertexSpec{`, - `AbstractVertex:` + strings.Replace(strings.Replace(this.AbstractVertex.String(), "AbstractVertex", "AbstractVertex", 1), `&`, ``, 1) + `,`, - `PipelineName:` + fmt.Sprintf("%v", this.PipelineName) + `,`, - `InterStepBufferServiceName:` + fmt.Sprintf("%v", this.InterStepBufferServiceName) + `,`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `FromEdges:` + repeatedStringForFromEdges + `,`, - `ToEdges:` + repeatedStringForToEdges + `,`, - `Watermark:` + strings.Replace(strings.Replace(this.Watermark.String(), "Watermark", "Watermark", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&InterStepBufferServiceSpec{`, + `Redis:` + strings.Replace(this.Redis.String(), "RedisBufferService", "RedisBufferService", 1) + `,`, + `JetStream:` + strings.Replace(this.JetStream.String(), "JetStreamBufferService", "JetStreamBufferService", 1) + `,`, `}`, }, "") return s } -func (this *VertexStatus) String() string { +func (this *InterStepBufferServiceStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&VertexStatus{`, + s := strings.Join([]string{`&InterStepBufferServiceStatus{`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "Status", "Status", 1), `&`, ``, 1) + `,`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastScaledAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastScaledAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `Config:` + strings.Replace(strings.Replace(this.Config.String(), "BufferServiceConfig", "BufferServiceConfig", 1), `&`, ``, 1) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `}`, }, "") return s } -func (this *VertexTemplate) String() string { +func (this *JetStreamBufferService) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&VertexTemplate{`, - `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&JetStreamBufferService{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `InitContainerTemplate:` + strings.Replace(this.InitContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `ReloaderContainerTemplate:` + strings.Replace(this.ReloaderContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `MetricsContainerTemplate:` + strings.Replace(this.MetricsContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `Persistence:` + strings.Replace(this.Persistence.String(), "PersistenceStrategy", "PersistenceStrategy", 1) + `,`, + `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, + `Settings:` + valueToStringGenerated(this.Settings) + `,`, + `StartArgs:` + fmt.Sprintf("%v", this.StartArgs) + `,`, + `BufferConfig:` + valueToStringGenerated(this.BufferConfig) + `,`, + `Encryption:` + fmt.Sprintf("%v", this.Encryption) + `,`, + `TLS:` + fmt.Sprintf("%v", this.TLS) + `,`, `}`, }, "") return s } -func (this *Watermark) String() string { +func (this *JetStreamConfig) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Watermark{`, - `Disabled:` + fmt.Sprintf("%v", this.Disabled) + `,`, - `MaxDelay:` + strings.Replace(fmt.Sprintf("%v", this.MaxDelay), "Duration", "v11.Duration", 1) + `,`, - `IdleSource:` + strings.Replace(this.IdleSource.String(), "IdleSource", "IdleSource", 1) + `,`, + s := strings.Join([]string{`&JetStreamConfig{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Auth:` + strings.Replace(this.Auth.String(), "NatsAuth", "NatsAuth", 1) + `,`, + `StreamConfig:` + fmt.Sprintf("%v", this.StreamConfig) + `,`, + `TLSEnabled:` + fmt.Sprintf("%v", this.TLSEnabled) + `,`, `}`, }, "") return s } -func (this *Window) String() string { +func (this *JetStreamSource) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Window{`, - `Fixed:` + strings.Replace(this.Fixed.String(), "FixedWindow", "FixedWindow", 1) + `,`, - `Sliding:` + strings.Replace(this.Sliding.String(), "SlidingWindow", "SlidingWindow", 1) + `,`, - `Session:` + strings.Replace(this.Session.String(), "SessionWindow", "SessionWindow", 1) + `,`, + s := strings.Join([]string{`&JetStreamSource{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Stream:` + fmt.Sprintf("%v", this.Stream) + `,`, + `TLS:` + strings.Replace(this.TLS.String(), "TLS", "TLS", 1) + `,`, + `Auth:` + strings.Replace(this.Auth.String(), "NatsAuth", "NatsAuth", 1) + `,`, `}`, }, "") return s } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { +func (this *JobTemplate) String() string { + if this == nil { return "nil" } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + s := strings.Join([]string{`&JobTemplate{`, + `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, + `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`, + `BackoffLimit:` + valueToStringGenerated(this.BackoffLimit) + `,`, + `}`, + }, "") + return s } -func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated +func (this *KafkaSink) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KafkaSink{`, + `Brokers:` + fmt.Sprintf("%v", this.Brokers) + `,`, + `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, + `TLS:` + strings.Replace(this.TLS.String(), "TLS", "TLS", 1) + `,`, + `Config:` + fmt.Sprintf("%v", this.Config) + `,`, + `SASL:` + strings.Replace(this.SASL.String(), "SASL", "SASL", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KafkaSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KafkaSource{`, + `Brokers:` + fmt.Sprintf("%v", this.Brokers) + `,`, + `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, + `ConsumerGroupName:` + fmt.Sprintf("%v", this.ConsumerGroupName) + `,`, + `TLS:` + strings.Replace(this.TLS.String(), "TLS", "TLS", 1) + `,`, + `Config:` + fmt.Sprintf("%v", this.Config) + `,`, + `SASL:` + strings.Replace(this.SASL.String(), "SASL", "SASL", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Lifecycle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Lifecycle{`, + `DeleteGracePeriodSeconds:` + valueToStringGenerated(this.DeleteGracePeriodSeconds) + `,`, + `DesiredPhase:` + fmt.Sprintf("%v", this.DesiredPhase) + `,`, + `PauseGracePeriodSeconds:` + valueToStringGenerated(this.PauseGracePeriodSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *Log) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Log{`, + `}`, + }, "") + return s +} +func (this *Metadata) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Metadata{`, + `Annotations:` + mapStringForAnnotations + `,`, + `Labels:` + mapStringForLabels + `,`, + `}`, + }, "") + return s +} +func (this *MonoVertex) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MonoVertex{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MonoVertexSpec", "MonoVertexSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "MonoVertexStatus", "MonoVertexStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MonoVertexLimits) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MonoVertexLimits{`, + `ReadBatchSize:` + valueToStringGenerated(this.ReadBatchSize) + `,`, + `ReadTimeout:` + strings.Replace(fmt.Sprintf("%v", this.ReadTimeout), "Duration", "v11.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MonoVertexList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MonoVertex{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MonoVertex", "MonoVertex", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MonoVertexList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *MonoVertexSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForInitContainers := "[]Container{" + for _, f := range this.InitContainers { + repeatedStringForInitContainers += fmt.Sprintf("%v", f) + "," + } + repeatedStringForInitContainers += "}" + repeatedStringForSidecars := "[]Container{" + for _, f := range this.Sidecars { + repeatedStringForSidecars += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSidecars += "}" + s := strings.Join([]string{`&MonoVertexSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Source:` + strings.Replace(this.Source.String(), "Source", "Source", 1) + `,`, + `Sink:` + strings.Replace(this.Sink.String(), "Sink", "Sink", 1) + `,`, + `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, + `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `Limits:` + strings.Replace(this.Limits.String(), "MonoVertexLimits", "MonoVertexLimits", 1) + `,`, + `Scale:` + strings.Replace(strings.Replace(this.Scale.String(), "Scale", "Scale", 1), `&`, ``, 1) + `,`, + `InitContainers:` + repeatedStringForInitContainers + `,`, + `Sidecars:` + repeatedStringForSidecars + `,`, + `DaemonTemplate:` + strings.Replace(this.DaemonTemplate.String(), "DaemonTemplate", "DaemonTemplate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MonoVertexStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MonoVertexStatus{`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "Status", "Status", 1), `&`, ``, 1) + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdated:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdated), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `LastScaledAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastScaledAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `}`, + }, "") + return s +} +func (this *NativeRedis) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NativeRedis{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `RedisContainerTemplate:` + strings.Replace(this.RedisContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `SentinelContainerTemplate:` + strings.Replace(this.SentinelContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `MetricsContainerTemplate:` + strings.Replace(this.MetricsContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `InitContainerTemplate:` + strings.Replace(this.InitContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `Persistence:` + strings.Replace(this.Persistence.String(), "PersistenceStrategy", "PersistenceStrategy", 1) + `,`, + `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, + `Settings:` + strings.Replace(this.Settings.String(), "RedisSettings", "RedisSettings", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NatsAuth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NatsAuth{`, + `Basic:` + strings.Replace(this.Basic.String(), "BasicAuth", "BasicAuth", 1) + `,`, + `Token:` + strings.Replace(fmt.Sprintf("%v", this.Token), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `NKey:` + strings.Replace(fmt.Sprintf("%v", this.NKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NatsSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NatsSource{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Subject:` + fmt.Sprintf("%v", this.Subject) + `,`, + `Queue:` + fmt.Sprintf("%v", this.Queue) + `,`, + `TLS:` + strings.Replace(this.TLS.String(), "TLS", "TLS", 1) + `,`, + `Auth:` + strings.Replace(this.Auth.String(), "NatsAuth", "NatsAuth", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NoStore) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NoStore{`, + `}`, + }, "") + return s +} +func (this *PBQStorage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PBQStorage{`, + `PersistentVolumeClaim:` + strings.Replace(this.PersistentVolumeClaim.String(), "PersistenceStrategy", "PersistenceStrategy", 1) + `,`, + `EmptyDir:` + strings.Replace(fmt.Sprintf("%v", this.EmptyDir), "EmptyDirVolumeSource", "v1.EmptyDirVolumeSource", 1) + `,`, + `NoStore:` + strings.Replace(this.NoStore.String(), "NoStore", "NoStore", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistenceStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistenceStrategy{`, + `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `AccessMode:` + valueToStringGenerated(this.AccessMode) + `,`, + `VolumeSize:` + strings.Replace(fmt.Sprintf("%v", this.VolumeSize), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Pipeline) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Pipeline{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PipelineSpec", "PipelineSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PipelineStatus", "PipelineStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PipelineLimits) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PipelineLimits{`, + `ReadBatchSize:` + valueToStringGenerated(this.ReadBatchSize) + `,`, + `BufferMaxLength:` + valueToStringGenerated(this.BufferMaxLength) + `,`, + `BufferUsageLimit:` + valueToStringGenerated(this.BufferUsageLimit) + `,`, + `ReadTimeout:` + strings.Replace(fmt.Sprintf("%v", this.ReadTimeout), "Duration", "v11.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PipelineList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Pipeline{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Pipeline", "Pipeline", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PipelineList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PipelineSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVertices := "[]AbstractVertex{" + for _, f := range this.Vertices { + repeatedStringForVertices += strings.Replace(strings.Replace(f.String(), "AbstractVertex", "AbstractVertex", 1), `&`, ``, 1) + "," + } + repeatedStringForVertices += "}" + repeatedStringForEdges := "[]Edge{" + for _, f := range this.Edges { + repeatedStringForEdges += strings.Replace(strings.Replace(f.String(), "Edge", "Edge", 1), `&`, ``, 1) + "," + } + repeatedStringForEdges += "}" + repeatedStringForSideInputs := "[]SideInput{" + for _, f := range this.SideInputs { + repeatedStringForSideInputs += strings.Replace(strings.Replace(f.String(), "SideInput", "SideInput", 1), `&`, ``, 1) + "," + } + repeatedStringForSideInputs += "}" + s := strings.Join([]string{`&PipelineSpec{`, + `InterStepBufferServiceName:` + fmt.Sprintf("%v", this.InterStepBufferServiceName) + `,`, + `Vertices:` + repeatedStringForVertices + `,`, + `Edges:` + repeatedStringForEdges + `,`, + `Lifecycle:` + strings.Replace(strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1), `&`, ``, 1) + `,`, + `Limits:` + strings.Replace(this.Limits.String(), "PipelineLimits", "PipelineLimits", 1) + `,`, + `Watermark:` + strings.Replace(strings.Replace(this.Watermark.String(), "Watermark", "Watermark", 1), `&`, ``, 1) + `,`, + `Templates:` + strings.Replace(this.Templates.String(), "Templates", "Templates", 1) + `,`, + `SideInputs:` + repeatedStringForSideInputs + `,`, + `}`, + }, "") + return s +} +func (this *PipelineStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PipelineStatus{`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "Status", "Status", 1), `&`, ``, 1) + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdated:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdated), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `VertexCount:` + valueToStringGenerated(this.VertexCount) + `,`, + `SourceCount:` + valueToStringGenerated(this.SourceCount) + `,`, + `SinkCount:` + valueToStringGenerated(this.SinkCount) + `,`, + `UDFCount:` + valueToStringGenerated(this.UDFCount) + `,`, + `MapUDFCount:` + valueToStringGenerated(this.MapUDFCount) + `,`, + `ReduceUDFCount:` + valueToStringGenerated(this.ReduceUDFCount) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `}`, + }, "") + return s +} +func (this *RedisBufferService) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RedisBufferService{`, + `Native:` + strings.Replace(this.Native.String(), "NativeRedis", "NativeRedis", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "RedisConfig", "RedisConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RedisConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RedisConfig{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `SentinelURL:` + fmt.Sprintf("%v", this.SentinelURL) + `,`, + `MasterName:` + fmt.Sprintf("%v", this.MasterName) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Password:` + strings.Replace(fmt.Sprintf("%v", this.Password), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `SentinelPassword:` + strings.Replace(fmt.Sprintf("%v", this.SentinelPassword), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RedisSettings) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RedisSettings{`, + `Redis:` + fmt.Sprintf("%v", this.Redis) + `,`, + `Master:` + fmt.Sprintf("%v", this.Master) + `,`, + `Replica:` + fmt.Sprintf("%v", this.Replica) + `,`, + `Sentinel:` + fmt.Sprintf("%v", this.Sentinel) + `,`, + `}`, + }, "") + return s +} +func (this *SASL) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SASL{`, + `Mechanism:` + valueToStringGenerated(this.Mechanism) + `,`, + `GSSAPI:` + strings.Replace(this.GSSAPI.String(), "GSSAPI", "GSSAPI", 1) + `,`, + `Plain:` + strings.Replace(this.Plain.String(), "SASLPlain", "SASLPlain", 1) + `,`, + `SCRAMSHA256:` + strings.Replace(this.SCRAMSHA256.String(), "SASLPlain", "SASLPlain", 1) + `,`, + `SCRAMSHA512:` + strings.Replace(this.SCRAMSHA512.String(), "SASLPlain", "SASLPlain", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SASLPlain) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SASLPlain{`, + `UserSecret:` + strings.Replace(fmt.Sprintf("%v", this.UserSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `PasswordSecret:` + strings.Replace(fmt.Sprintf("%v", this.PasswordSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Handshake:` + fmt.Sprintf("%v", this.Handshake) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `Disabled:` + fmt.Sprintf("%v", this.Disabled) + `,`, + `Min:` + valueToStringGenerated(this.Min) + `,`, + `Max:` + valueToStringGenerated(this.Max) + `,`, + `LookbackSeconds:` + valueToStringGenerated(this.LookbackSeconds) + `,`, + `DeprecatedCooldownSeconds:` + valueToStringGenerated(this.DeprecatedCooldownSeconds) + `,`, + `ZeroReplicaSleepSeconds:` + valueToStringGenerated(this.ZeroReplicaSleepSeconds) + `,`, + `TargetProcessingSeconds:` + valueToStringGenerated(this.TargetProcessingSeconds) + `,`, + `TargetBufferAvailability:` + valueToStringGenerated(this.TargetBufferAvailability) + `,`, + `ReplicasPerScale:` + valueToStringGenerated(this.ReplicasPerScale) + `,`, + `ScaleUpCooldownSeconds:` + valueToStringGenerated(this.ScaleUpCooldownSeconds) + `,`, + `ScaleDownCooldownSeconds:` + valueToStringGenerated(this.ScaleDownCooldownSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ServingSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServingSource{`, + `Auth:` + strings.Replace(this.Auth.String(), "Authorization", "Authorization", 1) + `,`, + `Service:` + fmt.Sprintf("%v", this.Service) + `,`, + `MsgIDHeaderKey:` + valueToStringGenerated(this.MsgIDHeaderKey) + `,`, + `Store:` + strings.Replace(this.Store.String(), "ServingStore", "ServingStore", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServingStore) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServingStore{`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `TTL:` + strings.Replace(fmt.Sprintf("%v", this.TTL), "Duration", "v11.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SessionWindow) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionWindow{`, + `Timeout:` + strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "v11.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SideInput) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumes += "}" + s := strings.Join([]string{`&SideInput{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Container:` + strings.Replace(this.Container.String(), "Container", "Container", 1) + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `Trigger:` + strings.Replace(this.Trigger.String(), "SideInputTrigger", "SideInputTrigger", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SideInputTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SideInputTrigger{`, + `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, + `Timezone:` + valueToStringGenerated(this.Timezone) + `,`, + `}`, + }, "") + return s +} +func (this *SideInputsManagerTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SideInputsManagerTemplate{`, + `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, + `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `InitContainerTemplate:` + strings.Replace(this.InitContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Sink) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sink{`, + `AbstractSink:` + strings.Replace(strings.Replace(this.AbstractSink.String(), "AbstractSink", "AbstractSink", 1), `&`, ``, 1) + `,`, + `Fallback:` + strings.Replace(this.Fallback.String(), "AbstractSink", "AbstractSink", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SlidingWindow) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SlidingWindow{`, + `Length:` + strings.Replace(fmt.Sprintf("%v", this.Length), "Duration", "v11.Duration", 1) + `,`, + `Slide:` + strings.Replace(fmt.Sprintf("%v", this.Slide), "Duration", "v11.Duration", 1) + `,`, + `Streaming:` + fmt.Sprintf("%v", this.Streaming) + `,`, + `}`, + }, "") + return s +} +func (this *Source) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Source{`, + `Generator:` + strings.Replace(this.Generator.String(), "GeneratorSource", "GeneratorSource", 1) + `,`, + `Kafka:` + strings.Replace(this.Kafka.String(), "KafkaSource", "KafkaSource", 1) + `,`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPSource", "HTTPSource", 1) + `,`, + `Nats:` + strings.Replace(this.Nats.String(), "NatsSource", "NatsSource", 1) + `,`, + `UDTransformer:` + strings.Replace(this.UDTransformer.String(), "UDTransformer", "UDTransformer", 1) + `,`, + `UDSource:` + strings.Replace(this.UDSource.String(), "UDSource", "UDSource", 1) + `,`, + `JetStream:` + strings.Replace(this.JetStream.String(), "JetStreamSource", "JetStreamSource", 1) + `,`, + `Serving:` + strings.Replace(this.Serving.String(), "ServingSource", "ServingSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Status) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&Status{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *TLS) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TLS{`, + `InsecureSkipVerify:` + fmt.Sprintf("%v", this.InsecureSkipVerify) + `,`, + `CACertSecret:` + strings.Replace(fmt.Sprintf("%v", this.CACertSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `CertSecret:` + strings.Replace(fmt.Sprintf("%v", this.CertSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `KeySecret:` + strings.Replace(fmt.Sprintf("%v", this.KeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TagConditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagConditions{`, + `Operator:` + valueToStringGenerated(this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *Templates) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Templates{`, + `DaemonTemplate:` + strings.Replace(this.DaemonTemplate.String(), "DaemonTemplate", "DaemonTemplate", 1) + `,`, + `JobTemplate:` + strings.Replace(this.JobTemplate.String(), "JobTemplate", "JobTemplate", 1) + `,`, + `SideInputsManagerTemplate:` + strings.Replace(this.SideInputsManagerTemplate.String(), "SideInputsManagerTemplate", "SideInputsManagerTemplate", 1) + `,`, + `VertexTemplate:` + strings.Replace(this.VertexTemplate.String(), "VertexTemplate", "VertexTemplate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Transformer) String() string { + if this == nil { + return "nil" + } + keysForKWArgs := make([]string, 0, len(this.KWArgs)) + for k := range this.KWArgs { + keysForKWArgs = append(keysForKWArgs, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForKWArgs) + mapStringForKWArgs := "map[string]string{" + for _, k := range keysForKWArgs { + mapStringForKWArgs += fmt.Sprintf("%v: %v,", k, this.KWArgs[k]) + } + mapStringForKWArgs += "}" + s := strings.Join([]string{`&Transformer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `KWArgs:` + mapStringForKWArgs + `,`, + `}`, + }, "") + return s +} +func (this *UDF) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UDF{`, + `Container:` + strings.Replace(this.Container.String(), "Container", "Container", 1) + `,`, + `Builtin:` + strings.Replace(this.Builtin.String(), "Function", "Function", 1) + `,`, + `GroupBy:` + strings.Replace(this.GroupBy.String(), "GroupBy", "GroupBy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UDSink) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UDSink{`, + `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UDSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UDSource{`, + `Container:` + strings.Replace(this.Container.String(), "Container", "Container", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UDTransformer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UDTransformer{`, + `Container:` + strings.Replace(this.Container.String(), "Container", "Container", 1) + `,`, + `Builtin:` + strings.Replace(this.Builtin.String(), "Transformer", "Transformer", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Vertex) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Vertex{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VertexSpec", "VertexSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VertexStatus", "VertexStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VertexInstance) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VertexInstance{`, + `Vertex:` + strings.Replace(this.Vertex.String(), "Vertex", "Vertex", 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Replica:` + fmt.Sprintf("%v", this.Replica) + `,`, + `}`, + }, "") + return s +} +func (this *VertexLimits) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VertexLimits{`, + `ReadBatchSize:` + valueToStringGenerated(this.ReadBatchSize) + `,`, + `ReadTimeout:` + strings.Replace(fmt.Sprintf("%v", this.ReadTimeout), "Duration", "v11.Duration", 1) + `,`, + `BufferMaxLength:` + valueToStringGenerated(this.BufferMaxLength) + `,`, + `BufferUsageLimit:` + valueToStringGenerated(this.BufferUsageLimit) + `,`, + `}`, + }, "") + return s +} +func (this *VertexList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Vertex{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Vertex", "Vertex", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&VertexList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *VertexSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForFromEdges := "[]CombinedEdge{" + for _, f := range this.FromEdges { + repeatedStringForFromEdges += strings.Replace(strings.Replace(f.String(), "CombinedEdge", "CombinedEdge", 1), `&`, ``, 1) + "," + } + repeatedStringForFromEdges += "}" + repeatedStringForToEdges := "[]CombinedEdge{" + for _, f := range this.ToEdges { + repeatedStringForToEdges += strings.Replace(strings.Replace(f.String(), "CombinedEdge", "CombinedEdge", 1), `&`, ``, 1) + "," + } + repeatedStringForToEdges += "}" + s := strings.Join([]string{`&VertexSpec{`, + `AbstractVertex:` + strings.Replace(strings.Replace(this.AbstractVertex.String(), "AbstractVertex", "AbstractVertex", 1), `&`, ``, 1) + `,`, + `PipelineName:` + fmt.Sprintf("%v", this.PipelineName) + `,`, + `InterStepBufferServiceName:` + fmt.Sprintf("%v", this.InterStepBufferServiceName) + `,`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `FromEdges:` + repeatedStringForFromEdges + `,`, + `ToEdges:` + repeatedStringForToEdges + `,`, + `Watermark:` + strings.Replace(strings.Replace(this.Watermark.String(), "Watermark", "Watermark", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VertexStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VertexStatus{`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "Status", "Status", 1), `&`, ``, 1) + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastScaledAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastScaledAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `}`, + }, "") + return s +} +func (this *VertexTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VertexTemplate{`, + `AbstractPodTemplate:` + strings.Replace(strings.Replace(this.AbstractPodTemplate.String(), "AbstractPodTemplate", "AbstractPodTemplate", 1), `&`, ``, 1) + `,`, + `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `InitContainerTemplate:` + strings.Replace(this.InitContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Watermark) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Watermark{`, + `Disabled:` + fmt.Sprintf("%v", this.Disabled) + `,`, + `MaxDelay:` + strings.Replace(fmt.Sprintf("%v", this.MaxDelay), "Duration", "v11.Duration", 1) + `,`, + `IdleSource:` + strings.Replace(this.IdleSource.String(), "IdleSource", "IdleSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Window) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Window{`, + `Fixed:` + strings.Replace(this.Fixed.String(), "FixedWindow", "FixedWindow", 1) + `,`, + `Sliding:` + strings.Replace(this.Sliding.String(), "SlidingWindow", "SlidingWindow", 1) + `,`, + `Session:` + strings.Replace(this.Session.String(), "SessionWindow", "SessionWindow", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AbstractPodTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AbstractPodTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &Metadata{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v1.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &v1.PodSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, v1.LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PriorityClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Affinity == nil { + m.Affinity = &v1.Affinity{} + } + if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.RuntimeClassName = &s + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutomountServiceAccountToken = &b + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DNSPolicy = k8s_io_api_core_v1.DNSPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNSConfig == nil { + m.DNSConfig = &v1.PodDNSConfig{} + } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AbstractSink) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AbstractSink: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AbstractSink: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Log == nil { + m.Log = &Log{} + } + if err := m.Log.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kafka", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kafka == nil { + m.Kafka = &KafkaSink{} + } + if err := m.Kafka.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blackhole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Blackhole == nil { + m.Blackhole = &Blackhole{} + } + if err := m.Blackhole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UDSink", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UDSink == nil { + m.UDSink = &UDSink{} + } + if err := m.UDSink.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AbstractVertex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AbstractVertex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AbstractVertex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Source == nil { + m.Source = &Source{} + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sink", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Sink == nil { + m.Sink = &Sink{} + } + if err := m.Sink.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UDF", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if m.UDF == nil { + m.UDF = &UDF{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AbstractPodTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AbstractPodTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.UDF.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerTemplate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11667,16 +13556,16 @@ func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = &Metadata{} + if m.ContainerTemplate == nil { + m.ContainerTemplate = &ContainerTemplate{} } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitContainerTemplate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11703,107 +13592,16 @@ func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeSelector == nil { - m.NodeSelector = make(map[string]string) + if m.InitContainerTemplate == nil { + m.InitContainerTemplate = &ContainerTemplate{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.InitContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.NodeSelector[mapkey] = mapvalue iNdEx = postIndex - case 3: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AbstractPodTemplate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11830,14 +13628,13 @@ func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tolerations = append(m.Tolerations, v1.Toleration{}) - if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AbstractPodTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11864,16 +13661,14 @@ func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecurityContext == nil { - m.SecurityContext = &v1.PodSecurityContext{} - } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Volumes = append(m.Volumes, v1.Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11900,16 +13695,18 @@ func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ImagePullSecrets = append(m.ImagePullSecrets, v1.LocalObjectReference{}) - if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Limits == nil { + m.Limits = &VertexLimits{} + } + if err := m.Limits.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11919,29 +13716,30 @@ func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PriorityClassName = string(dAtA[iNdEx:postIndex]) + if err := m.Scale.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11951,15 +13749,29 @@ func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Priority = &v - case 8: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainers = append(m.InitContainers, v1.Container{}) + if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Sidecars", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11986,16 +13798,34 @@ func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Affinity == nil { - m.Affinity = &v1.Affinity{} - } - if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Sidecars = append(m.Sidecars, v1.Container{}) + if err := m.Sidecars[len(m.Sidecars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partitions", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Partitions = &v + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SideInputs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -12023,13 +13853,13 @@ func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + m.SideInputs = append(m.SideInputs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 10: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SideInputsContainerTemplate", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12039,81 +13869,81 @@ func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.RuntimeClassName = &s - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if m.SideInputsContainerTemplate == nil { + m.SideInputsContainerTemplate = &ContainerTemplate{} } - b := bool(v != 0) - m.AutomountServiceAccountToken = &b - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) + if err := m.SideInputsContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Authorization) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.DNSPolicy = k8s_io_api_core_v1.DNSPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Authorization: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Authorization: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12140,10 +13970,10 @@ func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.DNSConfig == nil { - m.DNSConfig = &v1.PodDNSConfig{} + if m.Token == nil { + m.Token = &v1.SecretKeySelector{} } - if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Token.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12168,7 +13998,7 @@ func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { } return nil } -func (m *AbstractSink) Unmarshal(dAtA []byte) error { +func (m *BasicAuth) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12191,15 +14021,15 @@ func (m *AbstractSink) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AbstractSink: wiretype end group for non-group") + return fmt.Errorf("proto: BasicAuth: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AbstractSink: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BasicAuth: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12226,16 +14056,16 @@ func (m *AbstractSink) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Log == nil { - m.Log = &Log{} + if m.User == nil { + m.User = &v1.SecretKeySelector{} } - if err := m.Log.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kafka", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12262,16 +14092,116 @@ func (m *AbstractSink) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Kafka == nil { - m.Kafka = &KafkaSink{} + if m.Password == nil { + m.Password = &v1.SecretKeySelector{} } - if err := m.Kafka.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Password.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Blackhole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Blackhole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Blackhole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BufferServiceConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BufferServiceConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BufferServiceConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blackhole", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Redis", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12298,16 +14228,16 @@ func (m *AbstractSink) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Blackhole == nil { - m.Blackhole = &Blackhole{} + if m.Redis == nil { + m.Redis = &RedisConfig{} } - if err := m.Blackhole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Redis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UDSink", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JetStream", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12334,10 +14264,10 @@ func (m *AbstractSink) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.UDSink == nil { - m.UDSink = &UDSink{} + if m.JetStream == nil { + m.JetStream = &JetStreamConfig{} } - if err := m.UDSink.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.JetStream.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12362,7 +14292,7 @@ func (m *AbstractSink) Unmarshal(dAtA []byte) error { } return nil } -func (m *AbstractVertex) Unmarshal(dAtA []byte) error { +func (m *CombinedEdge) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12385,17 +14315,17 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AbstractVertex: wiretype end group for non-group") + return fmt.Errorf("proto: CombinedEdge: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AbstractVertex: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CombinedEdge: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Edge", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12405,29 +14335,30 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.Edge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FromVertexType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12437,31 +14368,47 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Source == nil { - m.Source = &Source{} - } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.FromVertexType = VertexType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FromVertexPartitionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FromVertexPartitionCount = &v + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sink", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FromVertexLimits", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12488,18 +14435,18 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Sink == nil { - m.Sink = &Sink{} + if m.FromVertexLimits == nil { + m.FromVertexLimits = &VertexLimits{} } - if err := m.Sink.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FromVertexLimits.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UDF", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ToVertexType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12509,31 +14456,47 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.UDF == nil { - m.UDF = &UDF{} + m.ToVertexType = VertexType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ToVertexPartitionCount", wireType) } - if err := m.UDF.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 5: + m.ToVertexPartitionCount = &v + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerTemplate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ToVertexLimits", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12560,18 +14523,68 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ContainerTemplate == nil { - m.ContainerTemplate = &ContainerTemplate{} + if m.ToVertexLimits == nil { + m.ToVertexLimits = &VertexLimits{} } - if err := m.ContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ToVertexLimits.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Container) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Container: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitContainerTemplate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12581,33 +14594,29 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.InitContainerTemplate == nil { - m.InitContainerTemplate = &ContainerTemplate{} - } - if err := m.InitContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AbstractPodTemplate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12617,30 +14626,29 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.AbstractPodTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 8: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12650,29 +14658,27 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, v1.Volume{}) - if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 9: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12699,16 +14705,14 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Limits == nil { - m.Limits = &VertexLimits{} - } - if err := m.Limits.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Env = append(m.Env, v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 10: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12735,13 +14739,14 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Scale.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EnvFrom = append(m.EnvFrom, v1.EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12768,14 +14773,14 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.InitContainers = append(m.InitContainers, v1.Container{}) - if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.VolumeMounts = append(m.VolumeMounts, v1.VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 12: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sidecars", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12802,36 +14807,15 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Sidecars = append(m.Sidecars, v1.Container{}) - if err := m.Sidecars[len(m.Sidecars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partitions", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Partitions = &v - case 14: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideInputs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12841,29 +14825,33 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.SideInputs = append(m.SideInputs, string(dAtA[iNdEx:postIndex])) + if m.SecurityContext == nil { + m.SecurityContext = &v1.SecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 15: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideInputsContainerTemplate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12873,27 +14861,24 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SideInputsContainerTemplate == nil { - m.SideInputsContainerTemplate = &ContainerTemplate{} - } - if err := m.SideInputsContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) + m.ImagePullPolicy = &s iNdEx = postIndex default: iNdEx = preIndex @@ -12916,7 +14901,7 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { } return nil } -func (m *Authorization) Unmarshal(dAtA []byte) error { +func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12939,15 +14924,15 @@ func (m *Authorization) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Authorization: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerTemplate: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Authorization: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerTemplate: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12974,66 +14959,81 @@ func (m *Authorization) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Token == nil { - m.Token = &v1.SecretKeySelector{} - } - if err := m.Token.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BasicAuth) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.ImagePullPolicy = k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) } - if iNdEx >= l { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if m.SecurityContext == nil { + m.SecurityContext = &v1.SecurityContext{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BasicAuth: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BasicAuth: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13060,16 +15060,14 @@ func (m *BasicAuth) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.User == nil { - m.User = &v1.SecretKeySelector{} - } - if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Env = append(m.Env, v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13096,10 +15094,8 @@ func (m *BasicAuth) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Password == nil { - m.Password = &v1.SecretKeySelector{} - } - if err := m.Password.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EnvFrom = append(m.EnvFrom, v1.EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13124,7 +15120,7 @@ func (m *BasicAuth) Unmarshal(dAtA []byte) error { } return nil } -func (m *Blackhole) Unmarshal(dAtA []byte) error { +func (m *DaemonTemplate) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13147,65 +15143,68 @@ func (m *Blackhole) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Blackhole: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonTemplate: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Blackhole: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonTemplate: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AbstractPodTemplate", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BufferServiceConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if err := m.AbstractPodTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if iNdEx >= l { - return io.ErrUnexpectedEOF + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BufferServiceConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BufferServiceConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Replicas = &v + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Redis", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerTemplate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13232,16 +15231,16 @@ func (m *BufferServiceConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Redis == nil { - m.Redis = &RedisConfig{} + if m.ContainerTemplate == nil { + m.ContainerTemplate = &ContainerTemplate{} } - if err := m.Redis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JetStream", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitContainerTemplate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13268,10 +15267,10 @@ func (m *BufferServiceConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.JetStream == nil { - m.JetStream = &JetStreamConfig{} + if m.InitContainerTemplate == nil { + m.InitContainerTemplate = &ContainerTemplate{} } - if err := m.JetStream.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.InitContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13296,7 +15295,7 @@ func (m *BufferServiceConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *CombinedEdge) Unmarshal(dAtA []byte) error { +func (m *Edge) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13319,17 +15318,17 @@ func (m *CombinedEdge) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CombinedEdge: wiretype end group for non-group") + return fmt.Errorf("proto: Edge: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CombinedEdge: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Edge: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Edge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13339,28 +15338,27 @@ func (m *CombinedEdge) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Edge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.From = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FromVertexType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13388,13 +15386,13 @@ func (m *CombinedEdge) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FromVertexType = VertexType(dAtA[iNdEx:postIndex]) + m.To = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FromVertexPartitionCount", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13404,17 +15402,33 @@ func (m *CombinedEdge) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.FromVertexPartitionCount = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Conditions == nil { + m.Conditions = &ForwardConditions{} + } + if err := m.Conditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FromVertexLimits", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OnFull", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13424,33 +15438,80 @@ func (m *CombinedEdge) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.FromVertexLimits == nil { - m.FromVertexLimits = &VertexLimits{} - } - if err := m.FromVertexLimits.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + s := BufferFullWritingStrategy(dAtA[iNdEx:postIndex]) + m.OnFull = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 5: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FixedWindow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FixedWindow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FixedWindow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ToVertexType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13460,29 +15521,33 @@ func (m *CombinedEdge) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ToVertexType = VertexType(dAtA[iNdEx:postIndex]) + if m.Length == nil { + m.Length = &v11.Duration{} + } + if err := m.Length.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ToVertexPartitionCount", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Streaming", wireType) } - var v int32 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13492,15 +15557,65 @@ func (m *CombinedEdge) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - m.ToVertexPartitionCount = &v - case 7: + m.Streaming = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ForwardConditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ForwardConditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ForwardConditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ToVertexLimits", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13527,10 +15642,10 @@ func (m *CombinedEdge) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ToVertexLimits == nil { - m.ToVertexLimits = &VertexLimits{} + if m.Tags == nil { + m.Tags = &TagConditions{} } - if err := m.ToVertexLimits.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Tags.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13555,7 +15670,7 @@ func (m *CombinedEdge) Unmarshal(dAtA []byte) error { } return nil } -func (m *Container) Unmarshal(dAtA []byte) error { +func (m *Function) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13578,15 +15693,15 @@ func (m *Container) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Container: wiretype end group for non-group") + return fmt.Errorf("proto: Function: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Function: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13614,41 +15729,9 @@ func (m *Container) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Image = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) } @@ -13680,9 +15763,9 @@ func (m *Container) Unmarshal(dAtA []byte) error { } m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KWArgs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13709,117 +15792,159 @@ func (m *Container) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, v1.EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + if m.KWArgs == nil { + m.KWArgs = make(map[string]string) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EnvFrom = append(m.EnvFrom, v1.EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.KWArgs[mapkey] = mapvalue iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, v1.VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GSSAPI) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 8: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GSSAPI: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GSSAPI: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13829,31 +15954,27 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecurityContext == nil { - m.SecurityContext = &v1.SecurityContext{} - } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ServiceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Realm", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13881,62 +16002,11 @@ func (m *Container) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) - m.ImagePullPolicy = &s + m.Realm = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UsernameSecret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13963,13 +16033,16 @@ func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.UsernameSecret == nil { + m.UsernameSecret = &v1.SecretKeySelector{} + } + if err := m.UsernameSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AuthType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13997,11 +16070,12 @@ func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ImagePullPolicy = k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) + s := KRB5AuthType(dAtA[iNdEx:postIndex]) + m.AuthType = &s iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PasswordSecret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14028,16 +16102,16 @@ func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecurityContext == nil { - m.SecurityContext = &v1.SecurityContext{} + if m.PasswordSecret == nil { + m.PasswordSecret = &v1.SecretKeySelector{} } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PasswordSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KeytabSecret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14064,14 +16138,16 @@ func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, v1.EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.KeytabSecret == nil { + m.KeytabSecret = &v1.SecretKeySelector{} + } + if err := m.KeytabSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KerberosConfigSecret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14098,8 +16174,10 @@ func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, v1.EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.KerberosConfigSecret == nil { + m.KerberosConfigSecret = &v1.SecretKeySelector{} + } + if err := m.KerberosConfigSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14124,7 +16202,7 @@ func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { } return nil } -func (m *DaemonTemplate) Unmarshal(dAtA []byte) error { +func (m *GeneratorSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14147,15 +16225,35 @@ func (m *DaemonTemplate) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DaemonTemplate: wiretype end group for non-group") + return fmt.Errorf("proto: GeneratorSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GeneratorSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RPU", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RPU = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AbstractPodTemplate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14182,13 +16280,16 @@ func (m *DaemonTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.AbstractPodTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Duration == nil { + m.Duration = &v11.Duration{} + } + if err := m.Duration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MsgSize", wireType) } var v int32 for shift := uint(0); ; shift += 7 { @@ -14205,10 +16306,50 @@ func (m *DaemonTemplate) Unmarshal(dAtA []byte) error { break } } - m.Replicas = &v - case 3: + m.MsgSize = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.KeyCount = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = &v + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerTemplate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Jitter", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14235,18 +16376,18 @@ func (m *DaemonTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ContainerTemplate == nil { - m.ContainerTemplate = &ContainerTemplate{} + if m.Jitter == nil { + m.Jitter = &v11.Duration{} } - if err := m.ContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Jitter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitContainerTemplate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValueBlob", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14256,27 +16397,24 @@ func (m *DaemonTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.InitContainerTemplate == nil { - m.InitContainerTemplate = &ContainerTemplate{} + return ErrInvalidLengthGenerated } - if err := m.InitContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if postIndex > l { + return io.ErrUnexpectedEOF } + s := string(dAtA[iNdEx:postIndex]) + m.ValueBlob = &s iNdEx = postIndex default: iNdEx = preIndex @@ -14299,7 +16437,7 @@ func (m *DaemonTemplate) Unmarshal(dAtA []byte) error { } return nil } -func (m *Edge) Unmarshal(dAtA []byte) error { +func (m *GetDaemonDeploymentReq) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14322,15 +16460,15 @@ func (m *Edge) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Edge: wiretype end group for non-group") + return fmt.Errorf("proto: GetDaemonDeploymentReq: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Edge: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetDaemonDeploymentReq: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ISBSvcType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14358,11 +16496,11 @@ func (m *Edge) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.From = string(dAtA[iNdEx:postIndex]) + m.ISBSvcType = ISBSvcType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14390,11 +16528,43 @@ func (m *Edge) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.To = string(dAtA[iNdEx:postIndex]) + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PullPolicy = k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14421,18 +16591,16 @@ func (m *Edge) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Conditions == nil { - m.Conditions = &ForwardConditions{} - } - if err := m.Conditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Env = append(m.Env, v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OnFull", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DefaultResources", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14442,24 +16610,24 @@ func (m *Edge) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := BufferFullWritingStrategy(dAtA[iNdEx:postIndex]) - m.OnFull = &s + if err := m.DefaultResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -14482,7 +16650,7 @@ func (m *Edge) Unmarshal(dAtA []byte) error { } return nil } -func (m *FixedWindow) Unmarshal(dAtA []byte) error { +func (m *GetJetStreamServiceSpecReq) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14505,15 +16673,15 @@ func (m *FixedWindow) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FixedWindow: wiretype end group for non-group") + return fmt.Errorf("proto: GetJetStreamServiceSpecReq: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FixedWindow: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetJetStreamServiceSpecReq: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14540,88 +16708,109 @@ func (m *FixedWindow) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Length == nil { - m.Length = &v11.Duration{} - } - if err := m.Length.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Streaming", wireType) + if m.Labels == nil { + m.Labels = make(map[string]string) } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - m.Streaming = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ForwardConditions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ForwardConditions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ForwardConditions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterPort", wireType) } - var msglen int + m.ClusterPort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14631,28 +16820,68 @@ func (m *ForwardConditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.ClusterPort |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientPort", wireType) } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated + m.ClientPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClientPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MonitorPort", wireType) } - if m.Tags == nil { - m.Tags = &TagConditions{} + m.MonitorPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MonitorPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if err := m.Tags.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricsPort", wireType) + } + m.MetricsPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MetricsPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14674,7 +16903,7 @@ func (m *ForwardConditions) Unmarshal(dAtA []byte) error { } return nil } -func (m *Function) Unmarshal(dAtA []byte) error { +func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14697,15 +16926,15 @@ func (m *Function) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Function: wiretype end group for non-group") + return fmt.Errorf("proto: GetJetStreamStatefulSetSpecReq: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Function: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetJetStreamStatefulSetSpecReq: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14733,43 +16962,11 @@ func (m *Function) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.ServiceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KWArgs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14796,8 +16993,8 @@ func (m *Function) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.KWArgs == nil { - m.KWArgs = make(map[string]string) + if m.Labels == nil { + m.Labels = make(map[string]string) } var mapkey string var mapvalue string @@ -14892,63 +17089,128 @@ func (m *Function) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.KWArgs[mapkey] = mapvalue + m.Labels[mapkey] = mapvalue iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NatsImage", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GSSAPI) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.NatsImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricsExporterImage", wireType) } - if iNdEx >= l { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.MetricsExporterImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigReloaderImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigReloaderImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterPort", wireType) + } + m.ClusterPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GSSAPI: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GSSAPI: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientPort", wireType) } - var stringLen uint64 + m.ClientPort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14958,27 +17220,52 @@ func (m *GSSAPI) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ClientPort |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MonitorPort", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + m.MonitorPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MonitorPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricsPort", wireType) } - m.ServiceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + m.MetricsPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MetricsPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Realm", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServerAuthSecretName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15006,13 +17293,13 @@ func (m *GSSAPI) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Realm = string(dAtA[iNdEx:postIndex]) + m.ServerAuthSecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UsernameSecret", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServerEncryptionSecretName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15022,31 +17309,27 @@ func (m *GSSAPI) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.UsernameSecret == nil { - m.UsernameSecret = &v1.SecretKeySelector{} - } - if err := m.UsernameSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ServerEncryptionSecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15074,14 +17357,13 @@ func (m *GSSAPI) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := KRB5AuthType(dAtA[iNdEx:postIndex]) - m.AuthType = &s + m.ConfigMapName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PasswordSecret", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PvcNameIfNeeded", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15091,33 +17373,29 @@ func (m *GSSAPI) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PasswordSecret == nil { - m.PasswordSecret = &v1.SecretKeySelector{} - } - if err := m.PasswordSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.PvcNameIfNeeded = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeytabSecret", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartCommand", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15127,31 +17405,27 @@ func (m *GSSAPI) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.KeytabSecret == nil { - m.KeytabSecret = &v1.SecretKeySelector{} - } - if err := m.KeytabSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.StartCommand = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KerberosConfigSecret", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DefaultResources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15178,10 +17452,7 @@ func (m *GSSAPI) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.KerberosConfigSecret == nil { - m.KerberosConfigSecret = &v1.SecretKeySelector{} - } - if err := m.KerberosConfigSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.DefaultResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15206,7 +17477,7 @@ func (m *GSSAPI) Unmarshal(dAtA []byte) error { } return nil } -func (m *GeneratorSource) Unmarshal(dAtA []byte) error { +func (m *GetMonoVertexDaemonDeploymentReq) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15229,37 +17500,17 @@ func (m *GeneratorSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GeneratorSource: wiretype end group for non-group") + return fmt.Errorf("proto: GetMonoVertexDaemonDeploymentReq: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GeneratorSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetMonoVertexDaemonDeploymentReq: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RPU", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RPU = &v - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15269,33 +17520,29 @@ func (m *GeneratorSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Duration == nil { - m.Duration = &v11.Duration{} - } - if err := m.Duration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MsgSize", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15305,55 +17552,27 @@ func (m *GeneratorSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.MsgSize = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.KeyCount = &v - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if postIndex > l { + return io.ErrUnexpectedEOF } - m.Value = &v - case 6: + m.PullPolicy = k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Jitter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15380,18 +17599,16 @@ func (m *GeneratorSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Jitter == nil { - m.Jitter = &v11.Duration{} - } - if err := m.Jitter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Env = append(m.Env, v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueBlob", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DefaultResources", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15401,24 +17618,24 @@ func (m *GeneratorSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.ValueBlob = &s + if err := m.DefaultResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -15441,7 +17658,7 @@ func (m *GeneratorSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetDaemonDeploymentReq) Unmarshal(dAtA []byte) error { +func (m *GetMonoVertexPodSpecReq) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15464,15 +17681,15 @@ func (m *GetDaemonDeploymentReq) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetDaemonDeploymentReq: wiretype end group for non-group") + return fmt.Errorf("proto: GetMonoVertexPodSpecReq: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetDaemonDeploymentReq: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetMonoVertexPodSpecReq: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ISBSvcType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15500,11 +17717,11 @@ func (m *GetDaemonDeploymentReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ISBSvcType = ISBSvcType(dAtA[iNdEx:postIndex]) + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15532,13 +17749,13 @@ func (m *GetDaemonDeploymentReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Image = string(dAtA[iNdEx:postIndex]) + m.PullPolicy = k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15548,29 +17765,241 @@ func (m *GetDaemonDeploymentReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DefaultResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRedisServiceSpecReq) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRedisServiceSpecReq: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRedisServiceSpecReq: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PullPolicy = k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RedisContainerPort", wireType) } - var msglen int + m.RedisContainerPort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15580,31 +18009,16 @@ func (m *GetDaemonDeploymentReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.RedisContainerPort |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, v1.EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultResources", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SentinelContainerPort", wireType) } - var msglen int + m.SentinelContainerPort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15614,25 +18028,11 @@ func (m *GetDaemonDeploymentReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.SentinelContainerPort |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DefaultResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -15654,7 +18054,7 @@ func (m *GetDaemonDeploymentReq) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetJetStreamServiceSpecReq) Unmarshal(dAtA []byte) error { +func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15677,13 +18077,45 @@ func (m *GetJetStreamServiceSpecReq) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetJetStreamServiceSpecReq: wiretype end group for non-group") + return fmt.Errorf("proto: GetRedisStatefulSetSpecReq: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetJetStreamServiceSpecReq: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetRedisStatefulSetSpecReq: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } @@ -15810,11 +18242,177 @@ func (m *GetJetStreamServiceSpecReq) Unmarshal(dAtA []byte) error { } m.Labels[mapkey] = mapvalue iNdEx = postIndex - case 2: + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedisImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedisImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SentinelImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SentinelImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricsExporterImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricsExporterImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainerImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainerImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RedisContainerPort", wireType) + } + m.RedisContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RedisContainerPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SentinelContainerPort", wireType) + } + m.SentinelContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SentinelContainerPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterPort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RedisMetricsContainerPort", wireType) } - m.ClusterPort = 0 + m.RedisMetricsContainerPort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15824,16 +18422,16 @@ func (m *GetJetStreamServiceSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ClusterPort |= int32(b&0x7F) << shift + m.RedisMetricsContainerPort |= int32(b&0x7F) << shift if b < 0x80 { break } } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientPort", wireType) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CredentialSecretName", wireType) } - m.ClientPort = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15843,16 +18441,29 @@ func (m *GetJetStreamServiceSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ClientPort |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 4: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CredentialSecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonitorPort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLSEnabled", wireType) } - m.MonitorPort = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15862,16 +18473,17 @@ func (m *GetJetStreamServiceSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MonitorPort |= int32(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricsPort", wireType) + m.TLSEnabled = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PvcNameIfNeeded", wireType) } - m.MetricsPort = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15881,64 +18493,59 @@ func (m *GetJetStreamServiceSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MetricsPort |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.PvcNameIfNeeded = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfConfigMapName", wireType) } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetJetStreamStatefulSetSpecReq: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetJetStreamStatefulSetSpecReq: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfConfigMapName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ScriptsConfigMapName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15966,13 +18573,13 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceName = string(dAtA[iNdEx:postIndex]) + m.ScriptsConfigMapName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HealthConfigMapName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15982,122 +18589,110 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Labels == nil { - m.Labels = make(map[string]string) + m.HealthConfigMapName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultResources", wireType) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Labels[mapkey] = mapvalue + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DefaultResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSideInputDeploymentReq) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSideInputDeploymentReq: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSideInputDeploymentReq: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NatsImage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ISBSvcType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16125,11 +18720,11 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NatsImage = string(dAtA[iNdEx:postIndex]) + m.ISBSvcType = ISBSvcType(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricsExporterImage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16157,11 +18752,11 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MetricsExporterImage = string(dAtA[iNdEx:postIndex]) + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigReloaderImage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16189,13 +18784,13 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ConfigReloaderImage = string(dAtA[iNdEx:postIndex]) + m.PullPolicy = k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterPort", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } - m.ClusterPort = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16205,16 +18800,31 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ClusterPort |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientPort", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - m.ClientPort = 0 + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultResources", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16224,16 +18834,80 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ClientPort |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MonitorPort", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - m.MonitorPort = 0 + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DefaultResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetVertexPodSpecReq) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetVertexPodSpecReq: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetVertexPodSpecReq: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISBSvcType", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16243,16 +18917,29 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MonitorPort |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricsPort", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.MetricsPort = 0 + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ISBSvcType = ISBSvcType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16262,14 +18949,27 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MetricsPort |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 10: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAuthSecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16297,13 +18997,13 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServerAuthSecretName = string(dAtA[iNdEx:postIndex]) + m.PullPolicy = k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 11: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerEncryptionSecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16313,27 +19013,29 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ServerEncryptionSecretName = string(dAtA[iNdEx:postIndex]) + m.Env = append(m.Env, v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 12: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SideInputsStoreName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16361,11 +19063,11 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ConfigMapName = string(dAtA[iNdEx:postIndex]) + m.SideInputsStoreName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 13: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PvcNameIfNeeded", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServingSourceStreamName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16393,13 +19095,13 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PvcNameIfNeeded = string(dAtA[iNdEx:postIndex]) + m.ServingSourceStreamName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 14: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartCommand", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PipelineSpec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16409,25 +19111,26 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.StartCommand = string(dAtA[iNdEx:postIndex]) + if err := m.PipelineSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 15: + case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DefaultResources", wireType) } @@ -16481,7 +19184,7 @@ func (m *GetJetStreamStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetRedisServiceSpecReq) Unmarshal(dAtA []byte) error { +func (m *GroupBy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16504,15 +19207,15 @@ func (m *GetRedisServiceSpecReq) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetRedisServiceSpecReq: wiretype end group for non-group") + return fmt.Errorf("proto: GroupBy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetRedisServiceSpecReq: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupBy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16539,109 +19242,157 @@ func (m *GetRedisServiceSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Labels == nil { - m.Labels = make(map[string]string) + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Keyed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedLateness", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllowedLateness == nil { + m.AllowedLateness = &v11.Duration{} + } + if err := m.AllowedLateness.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Storage == nil { + m.Storage = &PBQStorage{} + } + if err := m.Storage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RedisContainerPort", wireType) + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) } - m.RedisContainerPort = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16651,16 +19402,33 @@ func (m *GetRedisServiceSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RedisContainerPort |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 3: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Auth == nil { + m.Auth = &Authorization{} + } + if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SentinelContainerPort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) } - m.SentinelContainerPort = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16670,11 +19438,12 @@ func (m *GetRedisServiceSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SentinelContainerPort |= int32(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.Service = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -16696,7 +19465,7 @@ func (m *GetRedisServiceSpecReq) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { +func (m *IdleSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16719,17 +19488,17 @@ func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetRedisStatefulSetSpecReq: wiretype end group for non-group") + return fmt.Errorf("proto: IdleSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetRedisStatefulSetSpecReq: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IdleSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Threshold", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16739,27 +19508,31 @@ func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceName = string(dAtA[iNdEx:postIndex]) + if m.Threshold == nil { + m.Threshold = &v11.Duration{} + } + if err := m.Threshold.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StepInterval", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16786,109 +19559,18 @@ func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Labels == nil { - m.Labels = make(map[string]string) + if m.StepInterval == nil { + m.StepInterval = &v11.Duration{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.StepInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Labels[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RedisImage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IncrementBy", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16898,61 +19580,83 @@ func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.RedisImage = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SentinelImage", wireType) + if m.IncrementBy == nil { + m.IncrementBy = &v11.Duration{} } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.IncrementBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.SentinelImage = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InterStepBufferService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InterStepBufferService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InterStepBufferService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricsExporterImage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16962,29 +19666,30 @@ func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.MetricsExporterImage = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitContainerImage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16994,86 +19699,30 @@ func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.InitContainerImage = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RedisContainerPort", wireType) - } - m.RedisContainerPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RedisContainerPort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SentinelContainerPort", wireType) - } - m.SentinelContainerPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SentinelContainerPort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RedisMetricsContainerPort", wireType) - } - m.RedisMetricsContainerPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RedisMetricsContainerPort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - case 10: + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CredentialSecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17083,49 +19732,80 @@ func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.CredentialSecretName = string(dAtA[iNdEx:postIndex]) + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TLSEnabled", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.TLSEnabled = bool(v != 0) - case 12: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InterStepBufferServiceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InterStepBufferServiceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InterStepBufferServiceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PvcNameIfNeeded", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17135,29 +19815,30 @@ func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PvcNameIfNeeded = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 13: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfConfigMapName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17167,61 +19848,81 @@ func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ConfConfigMapName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScriptsConfigMapName", wireType) + m.Items = append(m.Items, InterStepBufferService{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InterStepBufferServiceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.ScriptsConfigMapName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 15: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InterStepBufferServiceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InterStepBufferServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HealthConfigMapName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Redis", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17231,27 +19932,31 @@ func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.HealthConfigMapName = string(dAtA[iNdEx:postIndex]) + if m.Redis == nil { + m.Redis = &RedisBufferService{} + } + if err := m.Redis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 16: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultResources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JetStream", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17278,7 +19983,10 @@ func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.DefaultResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.JetStream == nil { + m.JetStream = &JetStreamBufferService{} + } + if err := m.JetStream.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17303,7 +20011,7 @@ func (m *GetRedisStatefulSetSpecReq) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetSideInputDeploymentReq) Unmarshal(dAtA []byte) error { +func (m *InterStepBufferServiceStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17326,17 +20034,17 @@ func (m *GetSideInputDeploymentReq) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSideInputDeploymentReq: wiretype end group for non-group") + return fmt.Errorf("proto: InterStepBufferServiceStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSideInputDeploymentReq: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InterStepBufferServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ISBSvcType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17346,27 +20054,28 @@ func (m *GetSideInputDeploymentReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ISBSvcType = ISBSvcType(dAtA[iNdEx:postIndex]) + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17394,11 +20103,11 @@ func (m *GetSideInputDeploymentReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Image = string(dAtA[iNdEx:postIndex]) + m.Phase = ISBSvcPhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17426,11 +20135,11 @@ func (m *GetSideInputDeploymentReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PullPolicy = k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17457,16 +20166,15 @@ func (m *GetSideInputDeploymentReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, v1.EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultResources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17476,25 +20184,43 @@ func (m *GetSideInputDeploymentReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.DefaultResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = ISBSvcType(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17516,7 +20242,7 @@ func (m *GetSideInputDeploymentReq) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetVertexPodSpecReq) Unmarshal(dAtA []byte) error { +func (m *JetStreamBufferService) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17539,15 +20265,15 @@ func (m *GetVertexPodSpecReq) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetVertexPodSpecReq: wiretype end group for non-group") + return fmt.Errorf("proto: JetStreamBufferService: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetVertexPodSpecReq: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JetStreamBufferService: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ISBSvcType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17575,13 +20301,33 @@ func (m *GetVertexPodSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ISBSvcType = ISBSvcType(dAtA[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerTemplate", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17591,29 +20337,33 @@ func (m *GetVertexPodSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Image = string(dAtA[iNdEx:postIndex]) + if m.ContainerTemplate == nil { + m.ContainerTemplate = &ContainerTemplate{} + } + if err := m.ContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReloaderContainerTemplate", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17623,27 +20373,103 @@ func (m *GetVertexPodSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReloaderContainerTemplate == nil { + m.ReloaderContainerTemplate = &ContainerTemplate{} + } + if err := m.ReloaderContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricsContainerTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricsContainerTemplate == nil { + m.MetricsContainerTemplate = &ContainerTemplate{} + } + if err := m.MetricsContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Persistence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PullPolicy = k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) + if m.Persistence == nil { + m.Persistence = &PersistenceStrategy{} + } + if err := m.Persistence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AbstractPodTemplate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17670,14 +20496,13 @@ func (m *GetVertexPodSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, v1.EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AbstractPodTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideInputsStoreName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Settings", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17705,11 +20530,12 @@ func (m *GetVertexPodSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SideInputsStoreName = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Settings = &s iNdEx = postIndex - case 6: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServingSourceStreamName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartArgs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17737,13 +20563,13 @@ func (m *GetVertexPodSpecReq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServingSourceStreamName = string(dAtA[iNdEx:postIndex]) + m.StartArgs = append(m.StartArgs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 7: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PipelineSpec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BufferConfig", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17753,30 +20579,30 @@ func (m *GetVertexPodSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PipelineSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.BufferConfig = &s iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultResources", wireType) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Encryption", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17786,25 +20612,32 @@ func (m *GetVertexPodSpecReq) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Encryption = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } - if err := m.DefaultResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.TLS = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17826,7 +20659,7 @@ func (m *GetVertexPodSpecReq) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupBy) Unmarshal(dAtA []byte) error { +func (m *JetStreamConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17849,17 +20682,17 @@ func (m *GroupBy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupBy: wiretype end group for non-group") + return fmt.Errorf("proto: JetStreamConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupBy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JetStreamConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17869,84 +20702,27 @@ func (m *GroupBy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.URL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Keyed = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedLateness", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AllowedLateness == nil { - m.AllowedLateness = &v11.Duration{} - } - if err := m.AllowedLateness.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17973,68 +20749,18 @@ func (m *GroupBy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Storage == nil { - m.Storage = &PBQStorage{} + if m.Auth == nil { + m.Auth = &NatsAuth{} } - if err := m.Storage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StreamConfig", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18044,31 +20770,27 @@ func (m *HTTPSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Auth == nil { - m.Auth = &Authorization{} - } - if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.StreamConfig = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLSEnabled", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -18085,7 +20807,7 @@ func (m *HTTPSource) Unmarshal(dAtA []byte) error { break } } - m.Service = bool(v != 0) + m.TLSEnabled = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -18107,7 +20829,7 @@ func (m *HTTPSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *IdleSource) Unmarshal(dAtA []byte) error { +func (m *JetStreamSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18130,17 +20852,17 @@ func (m *IdleSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IdleSource: wiretype end group for non-group") + return fmt.Errorf("proto: JetStreamSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IdleSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JetStreamSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Threshold", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18150,31 +20872,59 @@ func (m *IdleSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Threshold == nil { - m.Threshold = &v11.Duration{} + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) } - if err := m.Threshold.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Stream = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StepInterval", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18201,16 +20951,16 @@ func (m *IdleSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.StepInterval == nil { - m.StepInterval = &v11.Duration{} + if m.TLS == nil { + m.TLS = &TLS{} } - if err := m.StepInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncrementBy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18237,10 +20987,10 @@ func (m *IdleSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.IncrementBy == nil { - m.IncrementBy = &v11.Duration{} + if m.Auth == nil { + m.Auth = &NatsAuth{} } - if err := m.IncrementBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18265,7 +21015,7 @@ func (m *IdleSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *InterStepBufferService) Unmarshal(dAtA []byte) error { +func (m *JobTemplate) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18288,15 +21038,15 @@ func (m *InterStepBufferService) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InterStepBufferService: wiretype end group for non-group") + return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InterStepBufferService: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AbstractPodTemplate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18323,13 +21073,13 @@ func (m *InterStepBufferService) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AbstractPodTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerTemplate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18356,15 +21106,18 @@ func (m *InterStepBufferService) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ContainerTemplate == nil { + m.ContainerTemplate = &ContainerTemplate{} + } + if err := m.ContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTLSecondsAfterFinished", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18374,25 +21127,32 @@ func (m *InterStepBufferService) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.TTLSecondsAfterFinished = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BackoffLimit", wireType) } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.BackoffLimit = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -18414,7 +21174,7 @@ func (m *InterStepBufferService) Unmarshal(dAtA []byte) error { } return nil } -func (m *InterStepBufferServiceList) Unmarshal(dAtA []byte) error { +func (m *KafkaSink) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18437,17 +21197,17 @@ func (m *InterStepBufferServiceList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InterStepBufferServiceList: wiretype end group for non-group") + return fmt.Errorf("proto: KafkaSink: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InterStepBufferServiceList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KafkaSink: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Brokers", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18457,30 +21217,29 @@ func (m *InterStepBufferServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Brokers = append(m.Brokers, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18490,79 +21249,27 @@ func (m *InterStepBufferServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, InterStepBufferService{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Topic = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InterStepBufferServiceSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InterStepBufferServiceSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InterStepBufferServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Redis", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18589,16 +21296,48 @@ func (m *InterStepBufferServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Redis == nil { - m.Redis = &RedisBufferService{} + if m.TLS == nil { + m.TLS = &TLS{} } - if err := m.Redis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JetStream", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SASL", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18625,10 +21364,10 @@ func (m *InterStepBufferServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.JetStream == nil { - m.JetStream = &JetStreamBufferService{} + if m.SASL == nil { + m.SASL = &SASL{} } - if err := m.JetStream.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SASL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18653,7 +21392,7 @@ func (m *InterStepBufferServiceSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *InterStepBufferServiceStatus) Unmarshal(dAtA []byte) error { +func (m *KafkaSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18676,17 +21415,17 @@ func (m *InterStepBufferServiceStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InterStepBufferServiceStatus: wiretype end group for non-group") + return fmt.Errorf("proto: KafkaSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InterStepBufferServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KafkaSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Brokers", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18696,28 +21435,27 @@ func (m *InterStepBufferServiceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Brokers = append(m.Brokers, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18745,11 +21483,11 @@ func (m *InterStepBufferServiceStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = ISBSvcPhase(dAtA[iNdEx:postIndex]) + m.Topic = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConsumerGroupName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18777,11 +21515,11 @@ func (m *InterStepBufferServiceStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.ConsumerGroupName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18808,13 +21546,16 @@ func (m *InterStepBufferServiceStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.TLS == nil { + m.TLS = &TLS{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18842,13 +21583,13 @@ func (m *InterStepBufferServiceStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ISBSvcType(dAtA[iNdEx:postIndex]) + m.Config = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SASL", wireType) } - m.ObservedGeneration = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18858,11 +21599,28 @@ func (m *InterStepBufferServiceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SASL == nil { + m.SASL = &SASL{} + } + if err := m.SASL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -18884,7 +21642,7 @@ func (m *InterStepBufferServiceStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *JetStreamBufferService) Unmarshal(dAtA []byte) error { +func (m *Lifecycle) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18907,47 +21665,15 @@ func (m *JetStreamBufferService) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JetStreamBufferService: wiretype end group for non-group") + return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JetStreamBufferService: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeleteGracePeriodSeconds", wireType) } var v int32 for shift := uint(0); ; shift += 7 { @@ -18964,12 +21690,12 @@ func (m *JetStreamBufferService) Unmarshal(dAtA []byte) error { break } } - m.Replicas = &v - case 3: + m.DeleteGracePeriodSeconds = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerTemplate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DesiredPhase", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18979,33 +21705,29 @@ func (m *JetStreamBufferService) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ContainerTemplate == nil { - m.ContainerTemplate = &ContainerTemplate{} - } - if err := m.ContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.DesiredPhase = PipelinePhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReloaderContainerTemplate", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PauseGracePeriodSeconds", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19015,103 +21737,115 @@ func (m *JetStreamBufferService) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated + m.PauseGracePeriodSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.ReloaderContainerTemplate == nil { - m.ReloaderContainerTemplate = &ContainerTemplate{} - } - if err := m.ReloaderContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricsContainerTemplate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Log) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - if m.MetricsContainerTemplate == nil { - m.MetricsContainerTemplate = &ContainerTemplate{} + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - if err := m.MetricsContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Log: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Log: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Persistence", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.Persistence == nil { - m.Persistence = &PersistenceStrategy{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.Persistence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 7: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AbstractPodTemplate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19138,80 +21872,109 @@ func (m *JetStreamBufferService) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.AbstractPodTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Settings", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Settings = &s - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartArgs", wireType) + if m.Annotations == nil { + m.Annotations = make(map[string]string) } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StartArgs = append(m.StartArgs, string(dAtA[iNdEx:postIndex])) + m.Annotations[mapkey] = mapvalue iNdEx = postIndex - case 10: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BufferConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19221,65 +21984,119 @@ func (m *JetStreamBufferService) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.BufferConfig = &s - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Encryption", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Encryption = bool(v != 0) - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + if m.Labels == nil { + m.Labels = make(map[string]string) } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - m.TLS = bool(v != 0) + m.Labels[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19301,7 +22118,7 @@ func (m *JetStreamBufferService) Unmarshal(dAtA []byte) error { } return nil } -func (m *JetStreamConfig) Unmarshal(dAtA []byte) error { +func (m *MonoVertex) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19324,17 +22141,17 @@ func (m *JetStreamConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JetStreamConfig: wiretype end group for non-group") + return fmt.Errorf("proto: MonoVertex: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JetStreamConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MonoVertex: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19344,27 +22161,28 @@ func (m *JetStreamConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.URL = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19391,18 +22209,15 @@ func (m *JetStreamConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Auth == nil { - m.Auth = &NatsAuth{} - } - if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19412,44 +22227,25 @@ func (m *JetStreamConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.StreamConfig = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TLSEnabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.TLSEnabled = bool(v != 0) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19459,116 +22255,52 @@ func (m *JetStreamConfig) Unmarshal(dAtA []byte) error { if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JetStreamSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JetStreamSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JetStreamSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Stream = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MonoVertexLimits) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - var msglen int + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MonoVertexLimits: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MonoVertexLimits: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadBatchSize", wireType) + } + var v uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19578,31 +22310,15 @@ func (m *JetStreamSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TLS == nil { - m.TLS = &TLS{} - } - if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: + m.ReadBatchSize = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadTimeout", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19629,10 +22345,10 @@ func (m *JetStreamSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Auth == nil { - m.Auth = &NatsAuth{} + if m.ReadTimeout == nil { + m.ReadTimeout = &v11.Duration{} } - if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ReadTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -19657,7 +22373,7 @@ func (m *JetStreamSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobTemplate) Unmarshal(dAtA []byte) error { +func (m *MonoVertexList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19680,15 +22396,15 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") + return fmt.Errorf("proto: MonoVertexList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MonoVertexList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AbstractPodTemplate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19715,13 +22431,13 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.AbstractPodTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerTemplate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19748,53 +22464,11 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ContainerTemplate == nil { - m.ContainerTemplate = &ContainerTemplate{} - } - if err := m.ContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, MonoVertex{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTLSecondsAfterFinished", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TTLSecondsAfterFinished = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BackoffLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.BackoffLimit = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19816,7 +22490,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } return nil } -func (m *KafkaSink) Unmarshal(dAtA []byte) error { +func (m *MonoVertexSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19839,17 +22513,17 @@ func (m *KafkaSink) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KafkaSink: wiretype end group for non-group") + return fmt.Errorf("proto: MonoVertexSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KafkaSink: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MonoVertexSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Brokers", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19859,29 +22533,17 @@ func (m *KafkaSink) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Brokers = append(m.Brokers, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + m.Replicas = &v case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19891,27 +22553,31 @@ func (m *KafkaSink) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Topic = string(dAtA[iNdEx:postIndex]) + if m.Source == nil { + m.Source = &Source{} + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Sink", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19938,18 +22604,18 @@ func (m *KafkaSink) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TLS == nil { - m.TLS = &TLS{} + if m.Sink == nil { + m.Sink = &Sink{} } - if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Sink.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AbstractPodTemplate", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19959,27 +22625,28 @@ func (m *KafkaSink) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Config = string(dAtA[iNdEx:postIndex]) + if err := m.AbstractPodTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SASL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerTemplate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20006,68 +22673,18 @@ func (m *KafkaSink) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SASL == nil { - m.SASL = &SASL{} + if m.ContainerTemplate == nil { + m.ContainerTemplate = &ContainerTemplate{} } - if err := m.SASL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KafkaSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KafkaSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KafkaSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Brokers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20077,29 +22694,31 @@ func (m *KafkaSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Brokers = append(m.Brokers, string(dAtA[iNdEx:postIndex])) + m.Volumes = append(m.Volumes, v1.Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20109,29 +22728,33 @@ func (m *KafkaSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Topic = string(dAtA[iNdEx:postIndex]) + if m.Limits == nil { + m.Limits = &MonoVertexLimits{} + } + if err := m.Limits.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerGroupName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20141,27 +22764,28 @@ func (m *KafkaSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ConsumerGroupName = string(dAtA[iNdEx:postIndex]) + if err := m.Scale.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20188,18 +22812,16 @@ func (m *KafkaSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TLS == nil { - m.TLS = &TLS{} - } - if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.InitContainers = append(m.InitContainers, v1.Container{}) + if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Sidecars", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20209,27 +22831,29 @@ func (m *KafkaSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Config = string(dAtA[iNdEx:postIndex]) + m.Sidecars = append(m.Sidecars, v1.Container{}) + if err := m.Sidecars[len(m.Sidecars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SASL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DaemonTemplate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20256,10 +22880,10 @@ func (m *KafkaSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SASL == nil { - m.SASL = &SASL{} + if m.DaemonTemplate == nil { + m.DaemonTemplate = &DaemonTemplate{} } - if err := m.SASL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.DaemonTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20284,7 +22908,7 @@ func (m *KafkaSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Lifecycle) Unmarshal(dAtA []byte) error { +func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20307,17 +22931,17 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") + return fmt.Errorf("proto: MonoVertexStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MonoVertexStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteGracePeriodSeconds", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20327,15 +22951,28 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.DeleteGracePeriodSeconds = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredPhase", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20363,13 +23000,13 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DesiredPhase = PipelinePhase(dAtA[iNdEx:postIndex]) + m.Phase = MonoVertexPhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PauseGracePeriodSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) } - var v int32 + m.Replicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20379,115 +23016,110 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + m.Replicas |= uint32(b&0x7F) << shift if b < 0x80 { break } } - m.PauseGracePeriodSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Log) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Log: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Log: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdated", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20514,107 +23146,13 @@ func (m *Metadata) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.LastUpdated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Annotations[mapkey] = mapvalue iNdEx = postIndex - case 2: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastScaledAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20641,104 +23179,29 @@ func (m *Metadata) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Labels == nil { - m.Labels = make(map[string]string) + if err := m.LastScaledAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break } } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index b3dd64f6b8..09b1af32c1 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -478,6 +478,26 @@ message GetJetStreamStatefulSetSpecReq { optional k8s.io.api.core.v1.ResourceRequirements defaultResources = 15; } +message GetMonoVertexDaemonDeploymentReq { + optional string image = 1; + + optional string pullPolicy = 2; + + repeated k8s.io.api.core.v1.EnvVar env = 3; + + optional k8s.io.api.core.v1.ResourceRequirements defaultResources = 4; +} + +message GetMonoVertexPodSpecReq { + optional string image = 1; + + optional string pullPolicy = 2; + + repeated k8s.io.api.core.v1.EnvVar env = 3; + + optional k8s.io.api.core.v1.ResourceRequirements defaultResources = 4; +} + message GetRedisServiceSpecReq { map labels = 1; @@ -818,6 +838,109 @@ message Metadata { map labels = 2; } +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=mvtx +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.reason` +// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +message MonoVertex { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional MonoVertexSpec spec = 2; + + // +optional + optional MonoVertexStatus status = 3; +} + +message MonoVertexLimits { + // Read batch size from the source. + // +kubebuilder:default=500 + // +optional + optional uint64 readBatchSize = 1; + + // Read timeout duration from the source. + // +kubebuilder:default= "1s" + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration readTimeout = 2; +} + +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message MonoVertexList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated MonoVertex items = 2; +} + +message MonoVertexSpec { + // +kubebuilder:default=1 + // +optional + optional int32 replicas = 1; + + optional Source source = 2; + + optional Sink sink = 3; + + // +optional + optional AbstractPodTemplate abstractPodTemplate = 4; + + // Container template for the main numa container. + // +optional + optional ContainerTemplate containerTemplate = 5; + + // +optional + // +patchStrategy=merge + // +patchMergeKey=name + repeated k8s.io.api.core.v1.Volume volumes = 6; + + // Limits define the limitations such as buffer read batch size for all the vertices of a pipeline, will override pipeline level settings + // +optional + optional MonoVertexLimits limits = 7; + + // Settings for autoscaling + // +optional + optional Scale scale = 8; + + // List of customized init containers belonging to the pod. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // +optional + repeated k8s.io.api.core.v1.Container initContainers = 9; + + // List of customized sidecar containers belonging to the pod. + // +optional + repeated k8s.io.api.core.v1.Container sidecars = 10; + + // Template for the daemon service deployment. + // +optional + optional DaemonTemplate daemonTemplate = 11; +} + +message MonoVertexStatus { + optional Status status = 1; + + optional string phase = 2; + + optional uint32 replicas = 3; + + optional string selector = 4; + + optional string reason = 5; + + optional string message = 6; + + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdated = 7; + + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaledAt = 8; + + optional int64 observedGeneration = 9; +} + message NativeRedis { // Redis version, such as "6.0.16" optional string version = 1; diff --git a/pkg/apis/numaflow/v1alpha1/get_spec_req.go b/pkg/apis/numaflow/v1alpha1/get_spec_req.go index cb0843f684..31575fda61 100644 --- a/pkg/apis/numaflow/v1alpha1/get_spec_req.go +++ b/pkg/apis/numaflow/v1alpha1/get_spec_req.go @@ -97,3 +97,17 @@ type GetSideInputDeploymentReq struct { Env []corev1.EnvVar `protobuf:"bytes,4,rep,name=env"` DefaultResources corev1.ResourceRequirements `protobuf:"bytes,5,opt,name=defaultResources"` } + +type GetMonoVertexDaemonDeploymentReq struct { + Image string `protobuf:"bytes,1,opt,name=image"` + PullPolicy corev1.PullPolicy `protobuf:"bytes,2,opt,name=pullPolicy,casttype=k8s.io/api/core/v1.PullPolicy"` + Env []corev1.EnvVar `protobuf:"bytes,3,rep,name=env"` + DefaultResources corev1.ResourceRequirements `protobuf:"bytes,4,opt,name=defaultResources"` +} + +type GetMonoVertexPodSpecReq struct { + Image string `protobuf:"bytes,1,opt,name=image"` + PullPolicy corev1.PullPolicy `protobuf:"bytes,2,opt,name=pullPolicy,casttype=k8s.io/api/core/v1.PullPolicy"` + Env []corev1.EnvVar `protobuf:"bytes,3,rep,name=env"` + DefaultResources corev1.ResourceRequirements `protobuf:"bytes,4,opt,name=defaultResources"` +} diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go new file mode 100644 index 0000000000..359056c14c --- /dev/null +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -0,0 +1,526 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "time" + + appv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" +) + +// +kubebuilder:validation:Enum="";Running;Failed;Pausing;Paused;Deleting +type MonoVertexPhase string + +const ( + MonoVertexPhaseUnknown MonoVertexPhase = "" + MonoVertexPhaseRunning MonoVertexPhase = "Running" + MonoVertexPhaseFailed MonoVertexPhase = "Failed" + + // MonoVertexConditionDeployed has the status True when the MonoVertex + // has its sub resources created and deployed. + MonoVertexConditionDeployed ConditionType = "Deployed" + // MonoVertexConditionDaemonHealthy has the status True when the daemon service of the mono vertex is healthy. + MonoVertexConditionDaemonHealthy ConditionType = "DaemonHealthy" + // MonoVertexPodsHealthy has the status True when the pods of the mono vertex are healthy + MonoVertexPodsHealthy ConditionType = "PodsHealthy" +) + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=mvtx +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.reason` +// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +type MonoVertex struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec MonoVertexSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // +optional + Status MonoVertexStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +func (mv MonoVertex) GetReplicas() int { + if mv.Spec.Replicas == nil { + return 1 + } + return int(*mv.Spec.Replicas) +} + +func (mv MonoVertex) GetHeadlessServiceName() string { + return mv.Name + "-mv-headless" +} + +func (mv MonoVertex) GetServiceObjs() []*corev1.Service { + svcs := []*corev1.Service{mv.getServiceObj(mv.GetHeadlessServiceName(), true, MonoVertexMetricsPort, MonoVertexMetricsPortName)} + return svcs +} + +func (mv MonoVertex) getServiceObj(name string, headless bool, port int32, servicePortName string) *corev1.Service { + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mv.Namespace, + Name: name, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(mv.GetObjectMeta(), MonoVertexGroupVersionKind)}, + Labels: map[string]string{ + KeyPartOf: Project, + KeyManagedBy: ControllerMonoVertex, + KeyComponent: ComponentMonoVertex, + KeyMonoVertexName: mv.Name, + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Port: port, TargetPort: intstr.FromInt32(port), Name: servicePortName}, + }, + Selector: map[string]string{ + KeyPartOf: Project, + KeyManagedBy: ControllerMonoVertex, + KeyComponent: ComponentMonoVertex, + KeyMonoVertexName: mv.Name, + }, + }, + } + if headless { + svc.Spec.ClusterIP = "None" + } + return svc +} + +func (mv MonoVertex) GetDaemonServiceName() string { + return fmt.Sprintf("%s-mv-daemon-svc", mv.Name) +} + +func (mv MonoVertex) GetDaemonDeploymentName() string { + return fmt.Sprintf("%s-mv-daemon", mv.Name) +} + +func (mv MonoVertex) GetDaemonServiceObj() *corev1.Service { + labels := map[string]string{ + KeyPartOf: Project, + KeyManagedBy: ControllerMonoVertex, + KeyComponent: ComponentMonoVertexDaemon, + KeyMonoVertexName: mv.Name, + } + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mv.Namespace, + Name: mv.GetDaemonServiceName(), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(mv.GetObjectMeta(), MonoVertexGroupVersionKind), + }, + Labels: labels, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Name: "tcp", Port: MonoVertexDaemonServicePort, TargetPort: intstr.FromInt32(MonoVertexDaemonServicePort)}, + }, + Selector: labels, + }, + } +} + +func (mv MonoVertex) GetDaemonDeploymentObj(req GetMonoVertexDaemonDeploymentReq) (*appv1.Deployment, error) { + mvVtxCopyBytes, err := json.Marshal(mv.simpleCopy()) + if err != nil { + return nil, fmt.Errorf("failed to marshal mono vertex spec") + } + encodedMonoVtx := base64.StdEncoding.EncodeToString(mvVtxCopyBytes) + envVars := []corev1.EnvVar{ + {Name: EnvNamespace, ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}}}, + {Name: EnvPod, ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}}}, + {Name: EnvMonoVertexObject, Value: encodedMonoVtx}, + {Name: EnvGoDebug, Value: os.Getenv(EnvGoDebug)}, + } + envVars = append(envVars, req.Env...) + c := corev1.Container{ + Ports: []corev1.ContainerPort{{ContainerPort: MonoVertexDaemonServicePort}}, + Name: CtrMain, + Image: req.Image, + ImagePullPolicy: req.PullPolicy, + Resources: req.DefaultResources, + Env: envVars, + Args: []string{"mvtx-daemon-server"}, + } + + c.ReadinessProbe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/readyz", + Port: intstr.FromInt32(MonoVertexDaemonServicePort), + Scheme: corev1.URISchemeHTTPS, + }, + }, + InitialDelaySeconds: 3, + PeriodSeconds: 3, + TimeoutSeconds: 1, + } + c.LivenessProbe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/livez", + Port: intstr.FromInt32(MonoVertexDaemonServicePort), + Scheme: corev1.URISchemeHTTPS, + }, + }, + InitialDelaySeconds: 30, + PeriodSeconds: 60, + TimeoutSeconds: 30, + } + + labels := map[string]string{ + KeyPartOf: Project, + KeyManagedBy: ControllerMonoVertex, + KeyComponent: ComponentMonoVertexDaemon, + KeyAppName: mv.GetDaemonDeploymentName(), + KeyMonoVertexName: mv.Name, + } + spec := appv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{c}, + }, + }, + } + if dt := mv.Spec.DaemonTemplate; dt != nil { + spec.Replicas = dt.Replicas + dt.AbstractPodTemplate.ApplyToPodTemplateSpec(&spec.Template) + if dt.ContainerTemplate != nil { + dt.ContainerTemplate.ApplyToNumaflowContainers(spec.Template.Spec.Containers) + } + } + return &appv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mv.Namespace, + Name: mv.GetDaemonDeploymentName(), + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(mv.GetObjectMeta(), MonoVertexGroupVersionKind), + }, + }, + Spec: spec, + }, nil +} + +// CommonEnvs returns the common envs for all mono vertex pod containers. +func (mv MonoVertex) commonEnvs() []corev1.EnvVar { + return []corev1.EnvVar{ + {Name: EnvNamespace, ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}}}, + {Name: EnvPod, ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}}}, + {Name: EnvReplica, ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.annotations['" + KeyReplica + "']"}}}, + {Name: EnvMonoVertexName, Value: mv.Name}, + } +} + +// SidecarEnvs returns the envs for sidecar containers. +func (mv MonoVertex) sidecarEnvs() []corev1.EnvVar { + return []corev1.EnvVar{ + {Name: EnvCPULimit, ValueFrom: &corev1.EnvVarSource{ + ResourceFieldRef: &corev1.ResourceFieldSelector{Resource: "limits.cpu"}}}, + {Name: EnvCPURequest, ValueFrom: &corev1.EnvVarSource{ + ResourceFieldRef: &corev1.ResourceFieldSelector{Resource: "requests.cpu"}}}, + {Name: EnvMemoryLimit, ValueFrom: &corev1.EnvVarSource{ + ResourceFieldRef: &corev1.ResourceFieldSelector{Resource: "limits.memory"}}}, + {Name: EnvMemoryRequest, ValueFrom: &corev1.EnvVarSource{ + ResourceFieldRef: &corev1.ResourceFieldSelector{Resource: "requests.memory"}}}, + } +} + +func (mv MonoVertex) simpleCopy() MonoVertex { + m := MonoVertex{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mv.Namespace, + Name: mv.Name, + }, + Spec: mv.Spec.DeepCopyWithoutReplicas(), + } + if m.Spec.Limits == nil { + m.Spec.Limits = &MonoVertexLimits{} + } + if m.Spec.Limits.ReadBatchSize == nil { + m.Spec.Limits.ReadBatchSize = ptr.To[uint64](DefaultReadBatchSize) + } + if m.Spec.Limits.ReadTimeout == nil { + m.Spec.Limits.ReadTimeout = &metav1.Duration{Duration: DefaultReadTimeout} + } + // TODO: lifecycle + // mvVtxCopy.Spec.Lifecycle = Lifecycle{} + return m +} + +func (mv MonoVertex) GetPodSpec(req GetMonoVertexPodSpecReq) (*corev1.PodSpec, error) { + monoVtxBytes, err := json.Marshal(mv.simpleCopy()) + if err != nil { + return nil, errors.New("failed to marshal mono vertex spec") + } + encodedMonoVertexSpec := base64.StdEncoding.EncodeToString(monoVtxBytes) + envVars := []corev1.EnvVar{ + {Name: EnvMonoVertexObject, Value: encodedMonoVertexSpec}, + } + envVars = append(envVars, mv.commonEnvs()...) + envVars = append(envVars, req.Env...) + + varVolumeName := "var-run-numaflow" + volumes := []corev1.Volume{ + { + Name: varVolumeName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }}, + }, + } + volumeMounts := []corev1.VolumeMount{{Name: varVolumeName, MountPath: PathVarRun}} + + containers := mv.Spec.buildContainers(getContainerReq{ + env: envVars, + image: req.Image, + imagePullPolicy: req.PullPolicy, + resources: req.DefaultResources, + volumeMounts: volumeMounts, + }) + + containers[0].ReadinessProbe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/readyz", + Port: intstr.FromInt32(MonoVertexMetricsPort), + Scheme: corev1.URISchemeHTTPS, + }, + }, + InitialDelaySeconds: 3, + PeriodSeconds: 3, + TimeoutSeconds: 1, + } + containers[0].LivenessProbe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/livez", + Port: intstr.FromInt32(MonoVertexMetricsPort), + Scheme: corev1.URISchemeHTTPS, + }, + }, + InitialDelaySeconds: 20, + PeriodSeconds: 60, + TimeoutSeconds: 30, + } + containers[0].Ports = []corev1.ContainerPort{ + {Name: MonoVertexMetricsPortName, ContainerPort: MonoVertexMetricsPort}, + } + + if len(containers) > 1 { // udf, udsink, udsource, or source vertex specifies a udtransformer + for i := 1; i < len(containers); i++ { + containers[i].Env = append(containers[i].Env, mv.commonEnvs()...) + containers[i].Env = append(containers[i].Env, mv.sidecarEnvs()...) + } + } + + spec := &corev1.PodSpec{ + Subdomain: mv.GetHeadlessServiceName(), + Volumes: append(volumes, mv.Spec.Volumes...), + InitContainers: mv.Spec.InitContainers, + Containers: append(containers, mv.Spec.Sidecars...), + } + mv.Spec.AbstractPodTemplate.ApplyToPodSpec(spec) + if mv.Spec.ContainerTemplate != nil { + mv.Spec.ContainerTemplate.ApplyToNumaflowContainers(spec.Containers) + } + return spec, nil +} + +type MonoVertexSpec struct { + // +kubebuilder:default=1 + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + Source *Source `json:"source,omitempty" protobuf:"bytes,2,opt,name=source"` + Sink *Sink `json:"sink,omitempty" protobuf:"bytes,3,opt,name=sink"` + // +optional + AbstractPodTemplate `json:",inline" protobuf:"bytes,4,opt,name=abstractPodTemplate"` + // Container template for the main numa container. + // +optional + ContainerTemplate *ContainerTemplate `json:"containerTemplate,omitempty" protobuf:"bytes,5,opt,name=containerTemplate"` + // +optional + // +patchStrategy=merge + // +patchMergeKey=name + Volumes []corev1.Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=volumes"` + // Limits define the limitations such as buffer read batch size for all the vertices of a pipeline, will override pipeline level settings + // +optional + Limits *MonoVertexLimits `json:"limits,omitempty" protobuf:"bytes,7,opt,name=limits"` + // Settings for autoscaling + // +optional + Scale Scale `json:"scale,omitempty" protobuf:"bytes,8,opt,name=scale"` + // List of customized init containers belonging to the pod. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // +optional + InitContainers []corev1.Container `json:"initContainers,omitempty" protobuf:"bytes,9,rep,name=initContainers"` + // List of customized sidecar containers belonging to the pod. + // +optional + Sidecars []corev1.Container `json:"sidecars,omitempty" protobuf:"bytes,10,rep,name=sidecars"` + // Template for the daemon service deployment. + // +optional + DaemonTemplate *DaemonTemplate `json:"daemonTemplate,omitempty" protobuf:"bytes,11,opt,name=daemonTemplate"` +} + +func (mvspec MonoVertexSpec) DeepCopyWithoutReplicas() MonoVertexSpec { + x := *mvspec.DeepCopy() + x.Replicas = ptr.To[int32](0) + return x +} + +func (mvspec MonoVertexSpec) buildContainers(req getContainerReq) []corev1.Container { + mainContainer := containerBuilder{}. + init(req).command("/bin/serve").build() // TODO: command + containers := []corev1.Container{mainContainer} + if mvspec.Source.UDSource != nil { // Only support UDSource for now. + containers = append(containers, mvspec.Source.getUDSourceContainer(req)) + } + if mvspec.Source.UDTransformer != nil { + containers = append(containers, mvspec.Source.getUDTransformerContainer(req)) + } + if mvspec.Sink.UDSink != nil { // Only support UDSink for now. + containers = append(containers, mvspec.Sink.getUDSinkContainer(req)) + } + // Fallback sink is not supported. + containers = append(containers, mvspec.Sidecars...) + return containers +} + +type MonoVertexLimits struct { + // Read batch size from the source. + // +kubebuilder:default=500 + // +optional + ReadBatchSize *uint64 `json:"readBatchSize,omitempty" protobuf:"varint,1,opt,name=readBatchSize"` + // Read timeout duration from the source. + // +kubebuilder:default= "1s" + // +optional + ReadTimeout *metav1.Duration `json:"readTimeout,omitempty" protobuf:"bytes,2,opt,name=readTimeout"` +} + +func (mvl MonoVertexLimits) GetReadBatchSize() uint64 { + if mvl.ReadBatchSize == nil { + return DefaultReadBatchSize + } + return *mvl.ReadBatchSize +} + +func (mvl MonoVertexLimits) GetReadTimeout() time.Duration { + if mvl.ReadTimeout == nil { + return DefaultReadTimeout + } + return mvl.ReadTimeout.Duration +} + +type MonoVertexStatus struct { + Status `json:",inline" protobuf:"bytes,1,opt,name=status"` + Phase MonoVertexPhase `json:"phase,omitempty" protobuf:"bytes,2,opt,name=phase,casttype=MonoVertexPhase"` + Replicas uint32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + Selector string `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` + LastUpdated metav1.Time `json:"lastUpdated,omitempty" protobuf:"bytes,7,opt,name=lastUpdated"` + LastScaledAt metav1.Time `json:"lastScaledAt,omitempty" protobuf:"bytes,8,opt,name=lastScaledAt"` + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,9,opt,name=observedGeneration"` +} + +// SetObservedGeneration sets the Status ObservedGeneration +func (mvs *MonoVertexStatus) SetObservedGeneration(value int64) { + mvs.ObservedGeneration = value +} + +// InitConditions sets conditions to Unknown state. +func (mvs *MonoVertexStatus) InitConditions() { + mvs.InitializeConditions(MonoVertexConditionDeployed, MonoVertexConditionDaemonHealthy, MonoVertexPodsHealthy) +} + +// MarkDeployed set the MonoVertex has it's sub resources deployed. +func (mvs *MonoVertexStatus) MarkDeployed() { + mvs.MarkTrue(MonoVertexConditionDeployed) +} + +// MarkDeployFailed set the MonoVertex deployment failed +func (mvs *MonoVertexStatus) MarkDeployFailed(reason, message string) { + mvs.MarkFalse(MonoVertexConditionDeployed, reason, message) + mvs.MarkPhaseFailed(reason, message) +} + +// MarkDaemonHealthy set the daemon service of the mono vertex is healthy. +func (mvs *MonoVertexStatus) MarkDaemonHealthy() { + mvs.MarkTrue(MonoVertexConditionDaemonHealthy) +} + +// MarkDaemonUnHealthy set the daemon service of the mono vertex is unhealthy. +func (mvs *MonoVertexStatus) MarkDaemonUnHealthy(reason, message string) { + mvs.MarkFalse(MonoVertexConditionDaemonHealthy, reason, message) + mvs.Message = "Degraded: " + message +} + +// MarkPodHealthy marks the pod as healthy with the given reason and message. +func (mvs *MonoVertexStatus) MarkPodHealthy(reason, message string) { + mvs.MarkTrueWithReason(MonoVertexPodsHealthy, reason, message) +} + +// MarkPodNotHealthy marks the pod not healthy with the given reason and message. +func (mvs *MonoVertexStatus) MarkPodNotHealthy(reason, message string) { + mvs.MarkFalse(MonoVertexPodsHealthy, reason, message) + mvs.Reason = reason + mvs.Message = "Degraded: " + message +} + +// MarkPhase marks the phase with the given reason and message. +func (mvs *MonoVertexStatus) MarkPhase(phase MonoVertexPhase, reason, message string) { + mvs.Phase = phase + mvs.Reason = reason + mvs.Message = message +} + +// MarkPhaseFailed marks the phase as failed with the given reason and message. +func (mvs *MonoVertexStatus) MarkPhaseFailed(reason, message string) { + mvs.MarkPhase(MonoVertexPhaseFailed, reason, message) +} + +// MarkPhaseRunning marks the phase as running. +func (mvs *MonoVertexStatus) MarkPhaseRunning() { + mvs.MarkPhase(MonoVertexPhaseRunning, "", "") +} + +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type MonoVertexList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []MonoVertex `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index 672c0e1621..ac9d1140ef 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -30,92 +30,99 @@ import ( func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.AbstractPodTemplate": schema_pkg_apis_numaflow_v1alpha1_AbstractPodTemplate(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.AbstractSink": schema_pkg_apis_numaflow_v1alpha1_AbstractSink(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.AbstractVertex": schema_pkg_apis_numaflow_v1alpha1_AbstractVertex(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Authorization": schema_pkg_apis_numaflow_v1alpha1_Authorization(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.BasicAuth": schema_pkg_apis_numaflow_v1alpha1_BasicAuth(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Blackhole": schema_pkg_apis_numaflow_v1alpha1_Blackhole(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.BufferServiceConfig": schema_pkg_apis_numaflow_v1alpha1_BufferServiceConfig(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.CombinedEdge": schema_pkg_apis_numaflow_v1alpha1_CombinedEdge(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Container": schema_pkg_apis_numaflow_v1alpha1_Container(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate": schema_pkg_apis_numaflow_v1alpha1_ContainerTemplate(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.DaemonTemplate": schema_pkg_apis_numaflow_v1alpha1_DaemonTemplate(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Edge": schema_pkg_apis_numaflow_v1alpha1_Edge(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.FixedWindow": schema_pkg_apis_numaflow_v1alpha1_FixedWindow(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ForwardConditions": schema_pkg_apis_numaflow_v1alpha1_ForwardConditions(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Function": schema_pkg_apis_numaflow_v1alpha1_Function(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GSSAPI": schema_pkg_apis_numaflow_v1alpha1_GSSAPI(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GeneratorSource": schema_pkg_apis_numaflow_v1alpha1_GeneratorSource(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetDaemonDeploymentReq": schema_pkg_apis_numaflow_v1alpha1_GetDaemonDeploymentReq(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetJetStreamServiceSpecReq": schema_pkg_apis_numaflow_v1alpha1_GetJetStreamServiceSpecReq(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetJetStreamStatefulSetSpecReq": schema_pkg_apis_numaflow_v1alpha1_GetJetStreamStatefulSetSpecReq(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetRedisServiceSpecReq": schema_pkg_apis_numaflow_v1alpha1_GetRedisServiceSpecReq(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetRedisStatefulSetSpecReq": schema_pkg_apis_numaflow_v1alpha1_GetRedisStatefulSetSpecReq(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetSideInputDeploymentReq": schema_pkg_apis_numaflow_v1alpha1_GetSideInputDeploymentReq(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetVertexPodSpecReq": schema_pkg_apis_numaflow_v1alpha1_GetVertexPodSpecReq(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GroupBy": schema_pkg_apis_numaflow_v1alpha1_GroupBy(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.HTTPSource": schema_pkg_apis_numaflow_v1alpha1_HTTPSource(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.IdleSource": schema_pkg_apis_numaflow_v1alpha1_IdleSource(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.InterStepBufferService": schema_pkg_apis_numaflow_v1alpha1_InterStepBufferService(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.InterStepBufferServiceList": schema_pkg_apis_numaflow_v1alpha1_InterStepBufferServiceList(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.InterStepBufferServiceSpec": schema_pkg_apis_numaflow_v1alpha1_InterStepBufferServiceSpec(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.InterStepBufferServiceStatus": schema_pkg_apis_numaflow_v1alpha1_InterStepBufferServiceStatus(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.JetStreamBufferService": schema_pkg_apis_numaflow_v1alpha1_JetStreamBufferService(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.JetStreamConfig": schema_pkg_apis_numaflow_v1alpha1_JetStreamConfig(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.JetStreamSource": schema_pkg_apis_numaflow_v1alpha1_JetStreamSource(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.JobTemplate": schema_pkg_apis_numaflow_v1alpha1_JobTemplate(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.KafkaSink": schema_pkg_apis_numaflow_v1alpha1_KafkaSink(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.KafkaSource": schema_pkg_apis_numaflow_v1alpha1_KafkaSource(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Lifecycle": schema_pkg_apis_numaflow_v1alpha1_Lifecycle(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Log": schema_pkg_apis_numaflow_v1alpha1_Log(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata": schema_pkg_apis_numaflow_v1alpha1_Metadata(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.NativeRedis": schema_pkg_apis_numaflow_v1alpha1_NativeRedis(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.NatsAuth": schema_pkg_apis_numaflow_v1alpha1_NatsAuth(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.NatsSource": schema_pkg_apis_numaflow_v1alpha1_NatsSource(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.NoStore": schema_pkg_apis_numaflow_v1alpha1_NoStore(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PBQStorage": schema_pkg_apis_numaflow_v1alpha1_PBQStorage(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PersistenceStrategy": schema_pkg_apis_numaflow_v1alpha1_PersistenceStrategy(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Pipeline": schema_pkg_apis_numaflow_v1alpha1_Pipeline(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PipelineLimits": schema_pkg_apis_numaflow_v1alpha1_PipelineLimits(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PipelineList": schema_pkg_apis_numaflow_v1alpha1_PipelineList(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PipelineSpec": schema_pkg_apis_numaflow_v1alpha1_PipelineSpec(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PipelineStatus": schema_pkg_apis_numaflow_v1alpha1_PipelineStatus(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisBufferService": schema_pkg_apis_numaflow_v1alpha1_RedisBufferService(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisConfig": schema_pkg_apis_numaflow_v1alpha1_RedisConfig(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisSettings": schema_pkg_apis_numaflow_v1alpha1_RedisSettings(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SASL": schema_pkg_apis_numaflow_v1alpha1_SASL(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SASLPlain": schema_pkg_apis_numaflow_v1alpha1_SASLPlain(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale": schema_pkg_apis_numaflow_v1alpha1_Scale(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ServingSource": schema_pkg_apis_numaflow_v1alpha1_ServingSource(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ServingStore": schema_pkg_apis_numaflow_v1alpha1_ServingStore(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SessionWindow": schema_pkg_apis_numaflow_v1alpha1_SessionWindow(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SideInput": schema_pkg_apis_numaflow_v1alpha1_SideInput(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SideInputTrigger": schema_pkg_apis_numaflow_v1alpha1_SideInputTrigger(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SideInputsManagerTemplate": schema_pkg_apis_numaflow_v1alpha1_SideInputsManagerTemplate(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink": schema_pkg_apis_numaflow_v1alpha1_Sink(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SlidingWindow": schema_pkg_apis_numaflow_v1alpha1_SlidingWindow(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source": schema_pkg_apis_numaflow_v1alpha1_Source(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Status": schema_pkg_apis_numaflow_v1alpha1_Status(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.TLS": schema_pkg_apis_numaflow_v1alpha1_TLS(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.TagConditions": schema_pkg_apis_numaflow_v1alpha1_TagConditions(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Templates": schema_pkg_apis_numaflow_v1alpha1_Templates(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Transformer": schema_pkg_apis_numaflow_v1alpha1_Transformer(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDF": schema_pkg_apis_numaflow_v1alpha1_UDF(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDSink": schema_pkg_apis_numaflow_v1alpha1_UDSink(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDSource": schema_pkg_apis_numaflow_v1alpha1_UDSource(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDTransformer": schema_pkg_apis_numaflow_v1alpha1_UDTransformer(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Vertex": schema_pkg_apis_numaflow_v1alpha1_Vertex(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexInstance": schema_pkg_apis_numaflow_v1alpha1_VertexInstance(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexLimits": schema_pkg_apis_numaflow_v1alpha1_VertexLimits(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexList": schema_pkg_apis_numaflow_v1alpha1_VertexList(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexSpec": schema_pkg_apis_numaflow_v1alpha1_VertexSpec(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexStatus": schema_pkg_apis_numaflow_v1alpha1_VertexStatus(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexTemplate": schema_pkg_apis_numaflow_v1alpha1_VertexTemplate(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Watermark": schema_pkg_apis_numaflow_v1alpha1_Watermark(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Window": schema_pkg_apis_numaflow_v1alpha1_Window(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.containerBuilder": schema_pkg_apis_numaflow_v1alpha1_containerBuilder(ref), - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.getContainerReq": schema_pkg_apis_numaflow_v1alpha1_getContainerReq(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.AbstractPodTemplate": schema_pkg_apis_numaflow_v1alpha1_AbstractPodTemplate(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.AbstractSink": schema_pkg_apis_numaflow_v1alpha1_AbstractSink(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.AbstractVertex": schema_pkg_apis_numaflow_v1alpha1_AbstractVertex(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Authorization": schema_pkg_apis_numaflow_v1alpha1_Authorization(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.BasicAuth": schema_pkg_apis_numaflow_v1alpha1_BasicAuth(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Blackhole": schema_pkg_apis_numaflow_v1alpha1_Blackhole(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.BufferServiceConfig": schema_pkg_apis_numaflow_v1alpha1_BufferServiceConfig(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.CombinedEdge": schema_pkg_apis_numaflow_v1alpha1_CombinedEdge(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Container": schema_pkg_apis_numaflow_v1alpha1_Container(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate": schema_pkg_apis_numaflow_v1alpha1_ContainerTemplate(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.DaemonTemplate": schema_pkg_apis_numaflow_v1alpha1_DaemonTemplate(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Edge": schema_pkg_apis_numaflow_v1alpha1_Edge(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.FixedWindow": schema_pkg_apis_numaflow_v1alpha1_FixedWindow(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ForwardConditions": schema_pkg_apis_numaflow_v1alpha1_ForwardConditions(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Function": schema_pkg_apis_numaflow_v1alpha1_Function(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GSSAPI": schema_pkg_apis_numaflow_v1alpha1_GSSAPI(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GeneratorSource": schema_pkg_apis_numaflow_v1alpha1_GeneratorSource(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetDaemonDeploymentReq": schema_pkg_apis_numaflow_v1alpha1_GetDaemonDeploymentReq(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetJetStreamServiceSpecReq": schema_pkg_apis_numaflow_v1alpha1_GetJetStreamServiceSpecReq(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetJetStreamStatefulSetSpecReq": schema_pkg_apis_numaflow_v1alpha1_GetJetStreamStatefulSetSpecReq(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetMonoVertexDaemonDeploymentReq": schema_pkg_apis_numaflow_v1alpha1_GetMonoVertexDaemonDeploymentReq(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetMonoVertexPodSpecReq": schema_pkg_apis_numaflow_v1alpha1_GetMonoVertexPodSpecReq(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetRedisServiceSpecReq": schema_pkg_apis_numaflow_v1alpha1_GetRedisServiceSpecReq(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetRedisStatefulSetSpecReq": schema_pkg_apis_numaflow_v1alpha1_GetRedisStatefulSetSpecReq(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetSideInputDeploymentReq": schema_pkg_apis_numaflow_v1alpha1_GetSideInputDeploymentReq(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GetVertexPodSpecReq": schema_pkg_apis_numaflow_v1alpha1_GetVertexPodSpecReq(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GroupBy": schema_pkg_apis_numaflow_v1alpha1_GroupBy(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.HTTPSource": schema_pkg_apis_numaflow_v1alpha1_HTTPSource(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.IdleSource": schema_pkg_apis_numaflow_v1alpha1_IdleSource(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.InterStepBufferService": schema_pkg_apis_numaflow_v1alpha1_InterStepBufferService(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.InterStepBufferServiceList": schema_pkg_apis_numaflow_v1alpha1_InterStepBufferServiceList(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.InterStepBufferServiceSpec": schema_pkg_apis_numaflow_v1alpha1_InterStepBufferServiceSpec(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.InterStepBufferServiceStatus": schema_pkg_apis_numaflow_v1alpha1_InterStepBufferServiceStatus(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.JetStreamBufferService": schema_pkg_apis_numaflow_v1alpha1_JetStreamBufferService(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.JetStreamConfig": schema_pkg_apis_numaflow_v1alpha1_JetStreamConfig(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.JetStreamSource": schema_pkg_apis_numaflow_v1alpha1_JetStreamSource(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.JobTemplate": schema_pkg_apis_numaflow_v1alpha1_JobTemplate(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.KafkaSink": schema_pkg_apis_numaflow_v1alpha1_KafkaSink(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.KafkaSource": schema_pkg_apis_numaflow_v1alpha1_KafkaSource(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Lifecycle": schema_pkg_apis_numaflow_v1alpha1_Lifecycle(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Log": schema_pkg_apis_numaflow_v1alpha1_Log(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata": schema_pkg_apis_numaflow_v1alpha1_Metadata(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertex": schema_pkg_apis_numaflow_v1alpha1_MonoVertex(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLimits": schema_pkg_apis_numaflow_v1alpha1_MonoVertexLimits(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexList": schema_pkg_apis_numaflow_v1alpha1_MonoVertexList(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexSpec": schema_pkg_apis_numaflow_v1alpha1_MonoVertexSpec(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexStatus": schema_pkg_apis_numaflow_v1alpha1_MonoVertexStatus(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.NativeRedis": schema_pkg_apis_numaflow_v1alpha1_NativeRedis(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.NatsAuth": schema_pkg_apis_numaflow_v1alpha1_NatsAuth(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.NatsSource": schema_pkg_apis_numaflow_v1alpha1_NatsSource(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.NoStore": schema_pkg_apis_numaflow_v1alpha1_NoStore(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PBQStorage": schema_pkg_apis_numaflow_v1alpha1_PBQStorage(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PersistenceStrategy": schema_pkg_apis_numaflow_v1alpha1_PersistenceStrategy(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Pipeline": schema_pkg_apis_numaflow_v1alpha1_Pipeline(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PipelineLimits": schema_pkg_apis_numaflow_v1alpha1_PipelineLimits(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PipelineList": schema_pkg_apis_numaflow_v1alpha1_PipelineList(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PipelineSpec": schema_pkg_apis_numaflow_v1alpha1_PipelineSpec(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PipelineStatus": schema_pkg_apis_numaflow_v1alpha1_PipelineStatus(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisBufferService": schema_pkg_apis_numaflow_v1alpha1_RedisBufferService(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisConfig": schema_pkg_apis_numaflow_v1alpha1_RedisConfig(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisSettings": schema_pkg_apis_numaflow_v1alpha1_RedisSettings(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SASL": schema_pkg_apis_numaflow_v1alpha1_SASL(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SASLPlain": schema_pkg_apis_numaflow_v1alpha1_SASLPlain(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale": schema_pkg_apis_numaflow_v1alpha1_Scale(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ServingSource": schema_pkg_apis_numaflow_v1alpha1_ServingSource(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ServingStore": schema_pkg_apis_numaflow_v1alpha1_ServingStore(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SessionWindow": schema_pkg_apis_numaflow_v1alpha1_SessionWindow(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SideInput": schema_pkg_apis_numaflow_v1alpha1_SideInput(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SideInputTrigger": schema_pkg_apis_numaflow_v1alpha1_SideInputTrigger(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SideInputsManagerTemplate": schema_pkg_apis_numaflow_v1alpha1_SideInputsManagerTemplate(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink": schema_pkg_apis_numaflow_v1alpha1_Sink(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SlidingWindow": schema_pkg_apis_numaflow_v1alpha1_SlidingWindow(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source": schema_pkg_apis_numaflow_v1alpha1_Source(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Status": schema_pkg_apis_numaflow_v1alpha1_Status(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.TLS": schema_pkg_apis_numaflow_v1alpha1_TLS(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.TagConditions": schema_pkg_apis_numaflow_v1alpha1_TagConditions(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Templates": schema_pkg_apis_numaflow_v1alpha1_Templates(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Transformer": schema_pkg_apis_numaflow_v1alpha1_Transformer(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDF": schema_pkg_apis_numaflow_v1alpha1_UDF(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDSink": schema_pkg_apis_numaflow_v1alpha1_UDSink(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDSource": schema_pkg_apis_numaflow_v1alpha1_UDSource(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDTransformer": schema_pkg_apis_numaflow_v1alpha1_UDTransformer(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Vertex": schema_pkg_apis_numaflow_v1alpha1_Vertex(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexInstance": schema_pkg_apis_numaflow_v1alpha1_VertexInstance(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexLimits": schema_pkg_apis_numaflow_v1alpha1_VertexLimits(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexList": schema_pkg_apis_numaflow_v1alpha1_VertexList(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexSpec": schema_pkg_apis_numaflow_v1alpha1_VertexSpec(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexStatus": schema_pkg_apis_numaflow_v1alpha1_VertexStatus(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexTemplate": schema_pkg_apis_numaflow_v1alpha1_VertexTemplate(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Watermark": schema_pkg_apis_numaflow_v1alpha1_Watermark(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Window": schema_pkg_apis_numaflow_v1alpha1_Window(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.containerBuilder": schema_pkg_apis_numaflow_v1alpha1_containerBuilder(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.getContainerReq": schema_pkg_apis_numaflow_v1alpha1_getContainerReq(ref), } } @@ -1516,6 +1523,102 @@ func schema_pkg_apis_numaflow_v1alpha1_GetJetStreamStatefulSetSpecReq(ref common } } +func schema_pkg_apis_numaflow_v1alpha1_GetMonoVertexDaemonDeploymentReq(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "Image": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "PullPolicy": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "Env": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "DefaultResources": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + }, + Required: []string{"Image", "PullPolicy", "Env", "DefaultResources"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements"}, + } +} + +func schema_pkg_apis_numaflow_v1alpha1_GetMonoVertexPodSpecReq(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "Image": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "PullPolicy": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "Env": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "DefaultResources": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + }, + Required: []string{"Image", "PullPolicy", "Env", "DefaultResources"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements"}, + } +} + func schema_pkg_apis_numaflow_v1alpha1_GetRedisServiceSpecReq(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2747,6 +2850,428 @@ func schema_pkg_apis_numaflow_v1alpha1_Metadata(ref common.ReferenceCallback) co } } +func schema_pkg_apis_numaflow_v1alpha1_MonoVertex(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexStatus"), + }, + }, + }, + Required: []string{"spec"}, + }, + }, + Dependencies: []string{ + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexSpec", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_numaflow_v1alpha1_MonoVertexLimits(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "readBatchSize": { + SchemaProps: spec.SchemaProps{ + Description: "Read batch size from the source.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "readTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "Read timeout duration from the source.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + } +} + +func schema_pkg_apis_numaflow_v1alpha1_MonoVertexList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertex"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertex", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_numaflow_v1alpha1_MonoVertexSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "replicas": { + SchemaProps: spec.SchemaProps{ + Type: []string{"integer"}, + Format: "int32", + }, + }, + "source": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source"), + }, + }, + "sink": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink"), + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata sets the pods's metadata, i.e. annotations and labels", + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata"), + }, + }, + "nodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "tolerations": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's tolerations.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Toleration"), + }, + }, + }, + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), + }, + }, + "imagePullSecrets": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + }, + }, + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "The pod's scheduling constraints More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountName applied to the pod", + Type: []string{"string"}, + Format: "", + }, + }, + "runtimeClassName": { + SchemaProps: spec.SchemaProps{ + Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", + Type: []string{"string"}, + Format: "", + }, + }, + "automountServiceAccountToken": { + SchemaProps: spec.SchemaProps{ + Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + Type: []string{"string"}, + Format: "", + }, + }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, + "containerTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "Container template for the main numa container.", + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate"), + }, + }, + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Volume"), + }, + }, + }, + }, + }, + "limits": { + SchemaProps: spec.SchemaProps{ + Description: "Limits define the limitations such as buffer read batch size for all the vertices of a pipeline, will override pipeline level settings", + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLimits"), + }, + }, + "scale": { + SchemaProps: spec.SchemaProps{ + Description: "Settings for autoscaling", + Default: map[string]interface{}{}, + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale"), + }, + }, + "initContainers": { + SchemaProps: spec.SchemaProps{ + Description: "List of customized init containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Container"), + }, + }, + }, + }, + }, + "sidecars": { + SchemaProps: spec.SchemaProps{ + Description: "List of customized sidecar containers belonging to the pod.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Container"), + }, + }, + }, + }, + }, + "daemonTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "Template for the daemon service deployment.", + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.DaemonTemplate"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.DaemonTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLimits", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + } +} + +func schema_pkg_apis_numaflow_v1alpha1_MonoVertexStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Conditions are the latest available observations of a resource's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"), + }, + }, + }, + }, + }, + "phase": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "replicas": { + SchemaProps: spec.SchemaProps{ + Default: 0, + Type: []string{"integer"}, + Format: "int64", + }, + }, + "selector": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "lastUpdated": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "lastScaledAt": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "observedGeneration": { + SchemaProps: spec.SchemaProps{ + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + Required: []string{"replicas"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Condition", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + func schema_pkg_apis_numaflow_v1alpha1_NativeRedis(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types.go b/pkg/apis/numaflow/v1alpha1/pipeline_types.go index a43ba5bd04..b6fe89e85a 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types.go @@ -392,7 +392,7 @@ func (p Pipeline) GetPipelineLimits() PipelineLimits { defaultReadBatchSize := uint64(DefaultReadBatchSize) defaultBufferMaxLength := uint64(DefaultBufferLength) defaultBufferUsageLimit := uint32(100 * DefaultBufferUsageLimit) - defaultReadTimeout := time.Second + defaultReadTimeout := DefaultReadTimeout limits := PipelineLimits{ ReadBatchSize: &defaultReadBatchSize, BufferMaxLength: &defaultBufferMaxLength, diff --git a/pkg/apis/numaflow/v1alpha1/register.go b/pkg/apis/numaflow/v1alpha1/register.go index 84a76ecff2..8dafa02c74 100644 --- a/pkg/apis/numaflow/v1alpha1/register.go +++ b/pkg/apis/numaflow/v1alpha1/register.go @@ -32,12 +32,14 @@ var ( // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme - ISBGroupVersionKind = SchemeGroupVersion.WithKind("InterStepBufferService") - ISBGroupVersionResource = SchemeGroupVersion.WithResource("interstepbufferservices") - PipelineGroupVersionKind = SchemeGroupVersion.WithKind("Pipeline") - PipelineGroupVersionResource = SchemeGroupVersion.WithResource("pipelines") - VertexGroupVersionKind = SchemeGroupVersion.WithKind("Vertex") - VertexGroupVersionResource = SchemeGroupVersion.WithResource("vertices") + ISBGroupVersionKind = SchemeGroupVersion.WithKind("InterStepBufferService") + ISBGroupVersionResource = SchemeGroupVersion.WithResource("interstepbufferservices") + PipelineGroupVersionKind = SchemeGroupVersion.WithKind("Pipeline") + PipelineGroupVersionResource = SchemeGroupVersion.WithResource("pipelines") + VertexGroupVersionKind = SchemeGroupVersion.WithKind("Vertex") + VertexGroupVersionResource = SchemeGroupVersion.WithResource("vertices") + MonoVertexGroupVersionKind = SchemeGroupVersion.WithKind("MonoVertex") + MonoVertexGroupVersionResource = SchemeGroupVersion.WithResource("monovertices") ) // Resource takes an unqualified resource and returns a Group qualified GroupResource @@ -53,6 +55,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &PipelineList{}, &Vertex{}, &VertexList{}, + &MonoVertex{}, + &MonoVertexList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/pkg/apis/numaflow/v1alpha1/scale.go b/pkg/apis/numaflow/v1alpha1/scale.go new file mode 100644 index 0000000000..c775468b17 --- /dev/null +++ b/pkg/apis/numaflow/v1alpha1/scale.go @@ -0,0 +1,134 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// Scale defines the parameters for autoscaling. +type Scale struct { + // Whether to disable autoscaling. + // Set to "true" when using Kubernetes HPA or any other 3rd party autoscaling strategies. + // +optional + Disabled bool `json:"disabled,omitempty" protobuf:"bytes,1,opt,name=disabled"` + // Minimum replicas. + // +optional + Min *int32 `json:"min,omitempty" protobuf:"varint,2,opt,name=min"` + // Maximum replicas. + // +optional + Max *int32 `json:"max,omitempty" protobuf:"varint,3,opt,name=max"` + // Lookback seconds to calculate the average pending messages and processing rate. + // +optional + LookbackSeconds *uint32 `json:"lookbackSeconds,omitempty" protobuf:"varint,4,opt,name=lookbackSeconds"` + // Deprecated: Use scaleUpCooldownSeconds and scaleDownCooldownSeconds instead. + // Cooldown seconds after a scaling operation before another one. + // +optional + DeprecatedCooldownSeconds *uint32 `json:"cooldownSeconds,omitempty" protobuf:"varint,5,opt,name=cooldownSeconds"` + // After scaling down the source vertex to 0, sleep how many seconds before scaling the source vertex back up to peek. + // +optional + ZeroReplicaSleepSeconds *uint32 `json:"zeroReplicaSleepSeconds,omitempty" protobuf:"varint,6,opt,name=zeroReplicaSleepSeconds"` + // TargetProcessingSeconds is used to tune the aggressiveness of autoscaling for source vertices, it measures how fast + // you want the vertex to process all the pending messages. Typically increasing the value, which leads to lower processing + // rate, thus less replicas. It's only effective for source vertices. + // +optional + TargetProcessingSeconds *uint32 `json:"targetProcessingSeconds,omitempty" protobuf:"varint,7,opt,name=targetProcessingSeconds"` + // TargetBufferAvailability is used to define the target percentage of the buffer availability. + // A valid and meaningful value should be less than the BufferUsageLimit defined in the Edge spec (or Pipeline spec), for example, 50. + // It only applies to UDF and Sink vertices because only they have buffers to read. + // +optional + TargetBufferAvailability *uint32 `json:"targetBufferAvailability,omitempty" protobuf:"varint,8,opt,name=targetBufferAvailability"` + // ReplicasPerScale defines maximum replicas can be scaled up or down at once. + // The is use to prevent too aggressive scaling operations + // +optional + ReplicasPerScale *uint32 `json:"replicasPerScale,omitempty" protobuf:"varint,9,opt,name=replicasPerScale"` + // ScaleUpCooldownSeconds defines the cooldown seconds after a scaling operation, before a follow-up scaling up. + // It defaults to the CooldownSeconds if not set. + // +optional + ScaleUpCooldownSeconds *uint32 `json:"scaleUpCooldownSeconds,omitempty" protobuf:"varint,10,opt,name=scaleUpCooldownSeconds"` + // ScaleDownCooldownSeconds defines the cooldown seconds after a scaling operation, before a follow-up scaling down. + // It defaults to the CooldownSeconds if not set. + // +optional + ScaleDownCooldownSeconds *uint32 `json:"scaleDownCooldownSeconds,omitempty" protobuf:"varint,11,opt,name=scaleDownCooldownSeconds"` +} + +func (s Scale) GetLookbackSeconds() int { + if s.LookbackSeconds != nil { + return int(*s.LookbackSeconds) + } + return DefaultLookbackSeconds +} + +func (s Scale) GetScaleUpCooldownSeconds() int { + if s.ScaleUpCooldownSeconds != nil { + return int(*s.ScaleUpCooldownSeconds) + } + if s.DeprecatedCooldownSeconds != nil { + return int(*s.DeprecatedCooldownSeconds) + } + return DefaultCooldownSeconds +} + +func (s Scale) GetScaleDownCooldownSeconds() int { + if s.ScaleDownCooldownSeconds != nil { + return int(*s.ScaleDownCooldownSeconds) + } + if s.DeprecatedCooldownSeconds != nil { + return int(*s.DeprecatedCooldownSeconds) + } + return DefaultCooldownSeconds +} + +func (s Scale) GetZeroReplicaSleepSeconds() int { + if s.ZeroReplicaSleepSeconds != nil { + return int(*s.ZeroReplicaSleepSeconds) + } + return DefaultZeroReplicaSleepSeconds +} + +func (s Scale) GetTargetProcessingSeconds() int { + if s.TargetProcessingSeconds != nil { + return int(*s.TargetProcessingSeconds) + } + return DefaultTargetProcessingSeconds +} + +func (s Scale) GetTargetBufferAvailability() int { + if s.TargetBufferAvailability != nil { + return int(*s.TargetBufferAvailability) + } + return DefaultTargetBufferAvailability +} + +func (s Scale) GetReplicasPerScale() int { + if s.ReplicasPerScale != nil { + return int(*s.ReplicasPerScale) + } + return DefaultReplicasPerScale +} + +func (s Scale) GetMinReplicas() int32 { + if x := s.Min; x == nil || *x < 0 { + return 0 + } else { + return *x + } +} + +func (s Scale) GetMaxReplicas() int32 { + if x := s.Max; x == nil { + return DefaultMaxReplicas + } else { + return *x + } +} diff --git a/pkg/apis/numaflow/v1alpha1/scale_test.go b/pkg/apis/numaflow/v1alpha1/scale_test.go new file mode 100644 index 0000000000..9bb09399eb --- /dev/null +++ b/pkg/apis/numaflow/v1alpha1/scale_test.go @@ -0,0 +1,66 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/utils/ptr" +) + +func Test_Scale_Parameters(t *testing.T) { + s := Scale{} + assert.Equal(t, int32(0), s.GetMinReplicas()) + assert.Equal(t, int32(DefaultMaxReplicas), s.GetMaxReplicas()) + assert.Equal(t, DefaultCooldownSeconds, s.GetScaleUpCooldownSeconds()) + assert.Equal(t, DefaultCooldownSeconds, s.GetScaleDownCooldownSeconds()) + assert.Equal(t, DefaultLookbackSeconds, s.GetLookbackSeconds()) + assert.Equal(t, DefaultReplicasPerScale, s.GetReplicasPerScale()) + assert.Equal(t, DefaultTargetBufferAvailability, s.GetTargetBufferAvailability()) + assert.Equal(t, DefaultTargetProcessingSeconds, s.GetTargetProcessingSeconds()) + assert.Equal(t, DefaultZeroReplicaSleepSeconds, s.GetZeroReplicaSleepSeconds()) + upcds := uint32(100) + downcds := uint32(99) + lbs := uint32(101) + rps := uint32(3) + tps := uint32(102) + tbu := uint32(33) + zrss := uint32(44) + s = Scale{ + Min: ptr.To[int32](2), + Max: ptr.To[int32](4), + ScaleUpCooldownSeconds: &upcds, + ScaleDownCooldownSeconds: &downcds, + LookbackSeconds: &lbs, + ReplicasPerScale: &rps, + TargetProcessingSeconds: &tps, + TargetBufferAvailability: &tbu, + ZeroReplicaSleepSeconds: &zrss, + } + assert.Equal(t, int32(2), s.GetMinReplicas()) + assert.Equal(t, int32(4), s.GetMaxReplicas()) + assert.Equal(t, int(upcds), s.GetScaleUpCooldownSeconds()) + assert.Equal(t, int(downcds), s.GetScaleDownCooldownSeconds()) + assert.Equal(t, int(lbs), s.GetLookbackSeconds()) + assert.Equal(t, int(rps), s.GetReplicasPerScale()) + assert.Equal(t, int(tbu), s.GetTargetBufferAvailability()) + assert.Equal(t, int(tps), s.GetTargetProcessingSeconds()) + assert.Equal(t, int(zrss), s.GetZeroReplicaSleepSeconds()) + s.Max = ptr.To[int32](500) + assert.Equal(t, int32(500), s.GetMaxReplicas()) +} diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types.go b/pkg/apis/numaflow/v1alpha1/vertex_types.go index 499924cf7e..c93af36a98 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types.go @@ -26,6 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" ) // +kubebuilder:validation:Enum="";Running;Failed @@ -213,7 +214,7 @@ func (v Vertex) GetPodSpec(req GetVertexPodSpecReq) (*corev1.PodSpec, error) { Namespace: v.Namespace, Name: v.Name, }, - Spec: v.Spec.WithOutReplicas(), + Spec: v.Spec.DeepCopyWithoutReplicas(), } vertexBytes, err := json.Marshal(vertexCopy) if err != nil { @@ -479,10 +480,9 @@ func (v Vertex) getInitContainers(req GetVertexPodSpecReq) []corev1.Container { return append(initContainers, v.Spec.InitContainers...) } -func (vs VertexSpec) WithOutReplicas() VertexSpec { - zero := int32(0) +func (vs VertexSpec) DeepCopyWithoutReplicas() VertexSpec { x := *vs.DeepCopy() - x.Replicas = &zero + x.Replicas = ptr.To[int32](0) return x } @@ -675,123 +675,6 @@ func (av AbstractVertex) OwnedBufferNames(namespace, pipeline string) []string { return r } -// Scale defines the parameters for autoscaling. -type Scale struct { - // Whether to disable autoscaling. - // Set to "true" when using Kubernetes HPA or any other 3rd party autoscaling strategies. - // +optional - Disabled bool `json:"disabled,omitempty" protobuf:"bytes,1,opt,name=disabled"` - // Minimum replicas. - // +optional - Min *int32 `json:"min,omitempty" protobuf:"varint,2,opt,name=min"` - // Maximum replicas. - // +optional - Max *int32 `json:"max,omitempty" protobuf:"varint,3,opt,name=max"` - // Lookback seconds to calculate the average pending messages and processing rate. - // +optional - LookbackSeconds *uint32 `json:"lookbackSeconds,omitempty" protobuf:"varint,4,opt,name=lookbackSeconds"` - // Deprecated: Use scaleUpCooldownSeconds and scaleDownCooldownSeconds instead. - // Cooldown seconds after a scaling operation before another one. - // +optional - DeprecatedCooldownSeconds *uint32 `json:"cooldownSeconds,omitempty" protobuf:"varint,5,opt,name=cooldownSeconds"` - // After scaling down the source vertex to 0, sleep how many seconds before scaling the source vertex back up to peek. - // +optional - ZeroReplicaSleepSeconds *uint32 `json:"zeroReplicaSleepSeconds,omitempty" protobuf:"varint,6,opt,name=zeroReplicaSleepSeconds"` - // TargetProcessingSeconds is used to tune the aggressiveness of autoscaling for source vertices, it measures how fast - // you want the vertex to process all the pending messages. Typically increasing the value, which leads to lower processing - // rate, thus less replicas. It's only effective for source vertices. - // +optional - TargetProcessingSeconds *uint32 `json:"targetProcessingSeconds,omitempty" protobuf:"varint,7,opt,name=targetProcessingSeconds"` - // TargetBufferAvailability is used to define the target percentage of the buffer availability. - // A valid and meaningful value should be less than the BufferUsageLimit defined in the Edge spec (or Pipeline spec), for example, 50. - // It only applies to UDF and Sink vertices because only they have buffers to read. - // +optional - TargetBufferAvailability *uint32 `json:"targetBufferAvailability,omitempty" protobuf:"varint,8,opt,name=targetBufferAvailability"` - // ReplicasPerScale defines maximum replicas can be scaled up or down at once. - // The is use to prevent too aggressive scaling operations - // +optional - ReplicasPerScale *uint32 `json:"replicasPerScale,omitempty" protobuf:"varint,9,opt,name=replicasPerScale"` - // ScaleUpCooldownSeconds defines the cooldown seconds after a scaling operation, before a follow-up scaling up. - // It defaults to the CooldownSeconds if not set. - // +optional - ScaleUpCooldownSeconds *uint32 `json:"scaleUpCooldownSeconds,omitempty" protobuf:"varint,10,opt,name=scaleUpCooldownSeconds"` - // ScaleDownCooldownSeconds defines the cooldown seconds after a scaling operation, before a follow-up scaling down. - // It defaults to the CooldownSeconds if not set. - // +optional - ScaleDownCooldownSeconds *uint32 `json:"scaleDownCooldownSeconds,omitempty" protobuf:"varint,11,opt,name=scaleDownCooldownSeconds"` -} - -func (s Scale) GetLookbackSeconds() int { - if s.LookbackSeconds != nil { - return int(*s.LookbackSeconds) - } - return DefaultLookbackSeconds -} - -func (s Scale) GetScaleUpCooldownSeconds() int { - if s.ScaleUpCooldownSeconds != nil { - return int(*s.ScaleUpCooldownSeconds) - } - if s.DeprecatedCooldownSeconds != nil { - return int(*s.DeprecatedCooldownSeconds) - } - return DefaultCooldownSeconds -} - -func (s Scale) GetScaleDownCooldownSeconds() int { - if s.ScaleDownCooldownSeconds != nil { - return int(*s.ScaleDownCooldownSeconds) - } - if s.DeprecatedCooldownSeconds != nil { - return int(*s.DeprecatedCooldownSeconds) - } - return DefaultCooldownSeconds -} - -func (s Scale) GetZeroReplicaSleepSeconds() int { - if s.ZeroReplicaSleepSeconds != nil { - return int(*s.ZeroReplicaSleepSeconds) - } - return DefaultZeroReplicaSleepSeconds -} - -func (s Scale) GetTargetProcessingSeconds() int { - if s.TargetProcessingSeconds != nil { - return int(*s.TargetProcessingSeconds) - } - return DefaultTargetProcessingSeconds -} - -func (s Scale) GetTargetBufferAvailability() int { - if s.TargetBufferAvailability != nil { - return int(*s.TargetBufferAvailability) - } - return DefaultTargetBufferAvailability -} - -func (s Scale) GetReplicasPerScale() int { - if s.ReplicasPerScale != nil { - return int(*s.ReplicasPerScale) - } - return DefaultReplicasPerScale -} - -func (s Scale) GetMinReplicas() int32 { - if x := s.Min; x == nil || *x < 0 { - return 0 - } else { - return *x - } -} - -func (s Scale) GetMaxReplicas() int32 { - if x := s.Max; x == nil { - return DefaultMaxReplicas - } else { - return *x - } -} - type VertexLimits struct { // Read batch size from the source or buffer. // It overrides the settings from pipeline limits. diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go index d0a3134ba7..a664fee1d0 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go @@ -121,7 +121,7 @@ func TestWithoutReplicas(t *testing.T) { s := &VertexSpec{ Replicas: ptr.To[int32](3), } - assert.Equal(t, int32(0), *s.WithOutReplicas().Replicas) + assert.Equal(t, int32(0), *s.DeepCopyWithoutReplicas().Replicas) } func TestGetVertexReplicas(t *testing.T) { @@ -648,47 +648,6 @@ func TestScalable(t *testing.T) { assert.True(t, v.Scalable()) } -func Test_Scale_Parameters(t *testing.T) { - s := Scale{} - assert.Equal(t, int32(0), s.GetMinReplicas()) - assert.Equal(t, int32(DefaultMaxReplicas), s.GetMaxReplicas()) - assert.Equal(t, DefaultCooldownSeconds, s.GetScaleUpCooldownSeconds()) - assert.Equal(t, DefaultCooldownSeconds, s.GetScaleDownCooldownSeconds()) - assert.Equal(t, DefaultLookbackSeconds, s.GetLookbackSeconds()) - assert.Equal(t, DefaultReplicasPerScale, s.GetReplicasPerScale()) - assert.Equal(t, DefaultTargetBufferAvailability, s.GetTargetBufferAvailability()) - assert.Equal(t, DefaultTargetProcessingSeconds, s.GetTargetProcessingSeconds()) - assert.Equal(t, DefaultZeroReplicaSleepSeconds, s.GetZeroReplicaSleepSeconds()) - upcds := uint32(100) - downcds := uint32(99) - lbs := uint32(101) - rps := uint32(3) - tps := uint32(102) - tbu := uint32(33) - zrss := uint32(44) - s = Scale{ - Min: ptr.To[int32](2), - Max: ptr.To[int32](4), - ScaleUpCooldownSeconds: &upcds, - ScaleDownCooldownSeconds: &downcds, - LookbackSeconds: &lbs, - ReplicasPerScale: &rps, - TargetProcessingSeconds: &tps, - TargetBufferAvailability: &tbu, - ZeroReplicaSleepSeconds: &zrss, - } - assert.Equal(t, int32(2), s.GetMinReplicas()) - assert.Equal(t, int32(4), s.GetMaxReplicas()) - assert.Equal(t, int(upcds), s.GetScaleUpCooldownSeconds()) - assert.Equal(t, int(downcds), s.GetScaleDownCooldownSeconds()) - assert.Equal(t, int(lbs), s.GetLookbackSeconds()) - assert.Equal(t, int(rps), s.GetReplicasPerScale()) - assert.Equal(t, int(tbu), s.GetTargetBufferAvailability()) - assert.Equal(t, int(tps), s.GetTargetProcessingSeconds()) - assert.Equal(t, int(zrss), s.GetZeroReplicaSleepSeconds()) - s.Max = ptr.To[int32](500) - assert.Equal(t, int32(500), s.GetMaxReplicas()) -} func Test_GetVertexType(t *testing.T) { t.Run("source vertex", func(t *testing.T) { v := Vertex{ diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go index bbf9bc3867..475ef0ea31 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go @@ -728,6 +728,54 @@ func (in *GetJetStreamStatefulSetSpecReq) DeepCopy() *GetJetStreamStatefulSetSpe return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GetMonoVertexDaemonDeploymentReq) DeepCopyInto(out *GetMonoVertexDaemonDeploymentReq) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.DefaultResources.DeepCopyInto(&out.DefaultResources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GetMonoVertexDaemonDeploymentReq. +func (in *GetMonoVertexDaemonDeploymentReq) DeepCopy() *GetMonoVertexDaemonDeploymentReq { + if in == nil { + return nil + } + out := new(GetMonoVertexDaemonDeploymentReq) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GetMonoVertexPodSpecReq) DeepCopyInto(out *GetMonoVertexPodSpecReq) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.DefaultResources.DeepCopyInto(&out.DefaultResources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GetMonoVertexPodSpecReq. +func (in *GetMonoVertexPodSpecReq) DeepCopy() *GetMonoVertexPodSpecReq { + if in == nil { + return nil + } + out := new(GetMonoVertexPodSpecReq) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GetRedisServiceSpecReq) DeepCopyInto(out *GetRedisServiceSpecReq) { *out = *in @@ -1278,6 +1326,181 @@ func (in *Metadata) DeepCopy() *Metadata { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonoVertex) DeepCopyInto(out *MonoVertex) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonoVertex. +func (in *MonoVertex) DeepCopy() *MonoVertex { + if in == nil { + return nil + } + out := new(MonoVertex) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonoVertex) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonoVertexLimits) DeepCopyInto(out *MonoVertexLimits) { + *out = *in + if in.ReadBatchSize != nil { + in, out := &in.ReadBatchSize, &out.ReadBatchSize + *out = new(uint64) + **out = **in + } + if in.ReadTimeout != nil { + in, out := &in.ReadTimeout, &out.ReadTimeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonoVertexLimits. +func (in *MonoVertexLimits) DeepCopy() *MonoVertexLimits { + if in == nil { + return nil + } + out := new(MonoVertexLimits) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonoVertexList) DeepCopyInto(out *MonoVertexList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonoVertex, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonoVertexList. +func (in *MonoVertexList) DeepCopy() *MonoVertexList { + if in == nil { + return nil + } + out := new(MonoVertexList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonoVertexList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonoVertexSpec) DeepCopyInto(out *MonoVertexSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(Source) + (*in).DeepCopyInto(*out) + } + if in.Sink != nil { + in, out := &in.Sink, &out.Sink + *out = new(Sink) + (*in).DeepCopyInto(*out) + } + in.AbstractPodTemplate.DeepCopyInto(&out.AbstractPodTemplate) + if in.ContainerTemplate != nil { + in, out := &in.ContainerTemplate, &out.ContainerTemplate + *out = new(ContainerTemplate) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = new(MonoVertexLimits) + (*in).DeepCopyInto(*out) + } + in.Scale.DeepCopyInto(&out.Scale) + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]v1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = make([]v1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DaemonTemplate != nil { + in, out := &in.DaemonTemplate, &out.DaemonTemplate + *out = new(DaemonTemplate) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonoVertexSpec. +func (in *MonoVertexSpec) DeepCopy() *MonoVertexSpec { + if in == nil { + return nil + } + out := new(MonoVertexSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonoVertexStatus) DeepCopyInto(out *MonoVertexStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + in.LastUpdated.DeepCopyInto(&out.LastUpdated) + in.LastScaledAt.DeepCopyInto(&out.LastScaledAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonoVertexStatus. +func (in *MonoVertexStatus) DeepCopy() *MonoVertexStatus { + if in == nil { + return nil + } + out := new(MonoVertexStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NativeRedis) DeepCopyInto(out *NativeRedis) { *out = *in diff --git a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_monovertex.go b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_monovertex.go new file mode 100644 index 0000000000..4e25ccd34b --- /dev/null +++ b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_monovertex.go @@ -0,0 +1,141 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeMonoVertices implements MonoVertexInterface +type FakeMonoVertices struct { + Fake *FakeNumaflowV1alpha1 + ns string +} + +var monoverticesResource = v1alpha1.SchemeGroupVersion.WithResource("monovertices") + +var monoverticesKind = v1alpha1.SchemeGroupVersion.WithKind("MonoVertex") + +// Get takes name of the monoVertex, and returns the corresponding monoVertex object, and an error if there is any. +func (c *FakeMonoVertices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.MonoVertex, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(monoverticesResource, c.ns, name), &v1alpha1.MonoVertex{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MonoVertex), err +} + +// List takes label and field selectors, and returns the list of MonoVertices that match those selectors. +func (c *FakeMonoVertices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.MonoVertexList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(monoverticesResource, monoverticesKind, c.ns, opts), &v1alpha1.MonoVertexList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.MonoVertexList{ListMeta: obj.(*v1alpha1.MonoVertexList).ListMeta} + for _, item := range obj.(*v1alpha1.MonoVertexList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested monoVertices. +func (c *FakeMonoVertices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(monoverticesResource, c.ns, opts)) + +} + +// Create takes the representation of a monoVertex and creates it. Returns the server's representation of the monoVertex, and an error, if there is any. +func (c *FakeMonoVertices) Create(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.CreateOptions) (result *v1alpha1.MonoVertex, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(monoverticesResource, c.ns, monoVertex), &v1alpha1.MonoVertex{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MonoVertex), err +} + +// Update takes the representation of a monoVertex and updates it. Returns the server's representation of the monoVertex, and an error, if there is any. +func (c *FakeMonoVertices) Update(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.UpdateOptions) (result *v1alpha1.MonoVertex, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(monoverticesResource, c.ns, monoVertex), &v1alpha1.MonoVertex{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MonoVertex), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeMonoVertices) UpdateStatus(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.UpdateOptions) (*v1alpha1.MonoVertex, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(monoverticesResource, "status", c.ns, monoVertex), &v1alpha1.MonoVertex{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MonoVertex), err +} + +// Delete takes name of the monoVertex and deletes it. Returns an error if one occurs. +func (c *FakeMonoVertices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(monoverticesResource, c.ns, name, opts), &v1alpha1.MonoVertex{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeMonoVertices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(monoverticesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.MonoVertexList{}) + return err +} + +// Patch applies the patch and returns the patched monoVertex. +func (c *FakeMonoVertices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.MonoVertex, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(monoverticesResource, c.ns, name, pt, data, subresources...), &v1alpha1.MonoVertex{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MonoVertex), err +} diff --git a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_numaflow_client.go b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_numaflow_client.go index 33fe35daef..dd960046cd 100644 --- a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_numaflow_client.go +++ b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_numaflow_client.go @@ -32,6 +32,10 @@ func (c *FakeNumaflowV1alpha1) InterStepBufferServices(namespace string) v1alpha return &FakeInterStepBufferServices{c, namespace} } +func (c *FakeNumaflowV1alpha1) MonoVertices(namespace string) v1alpha1.MonoVertexInterface { + return &FakeMonoVertices{c, namespace} +} + func (c *FakeNumaflowV1alpha1) Pipelines(namespace string) v1alpha1.PipelineInterface { return &FakePipelines{c, namespace} } diff --git a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/generated_expansion.go index 7493d3c9be..085924f882 100644 --- a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/generated_expansion.go @@ -20,6 +20,8 @@ package v1alpha1 type InterStepBufferServiceExpansion interface{} +type MonoVertexExpansion interface{} + type PipelineExpansion interface{} type VertexExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/monovertex.go b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/monovertex.go new file mode 100644 index 0000000000..674a3bd906 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/monovertex.go @@ -0,0 +1,195 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + scheme "github.com/numaproj/numaflow/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// MonoVerticesGetter has a method to return a MonoVertexInterface. +// A group's client should implement this interface. +type MonoVerticesGetter interface { + MonoVertices(namespace string) MonoVertexInterface +} + +// MonoVertexInterface has methods to work with MonoVertex resources. +type MonoVertexInterface interface { + Create(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.CreateOptions) (*v1alpha1.MonoVertex, error) + Update(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.UpdateOptions) (*v1alpha1.MonoVertex, error) + UpdateStatus(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.UpdateOptions) (*v1alpha1.MonoVertex, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.MonoVertex, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.MonoVertexList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.MonoVertex, err error) + MonoVertexExpansion +} + +// monoVertices implements MonoVertexInterface +type monoVertices struct { + client rest.Interface + ns string +} + +// newMonoVertices returns a MonoVertices +func newMonoVertices(c *NumaflowV1alpha1Client, namespace string) *monoVertices { + return &monoVertices{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the monoVertex, and returns the corresponding monoVertex object, and an error if there is any. +func (c *monoVertices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.MonoVertex, err error) { + result = &v1alpha1.MonoVertex{} + err = c.client.Get(). + Namespace(c.ns). + Resource("monovertices"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of MonoVertices that match those selectors. +func (c *monoVertices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.MonoVertexList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.MonoVertexList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("monovertices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested monoVertices. +func (c *monoVertices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("monovertices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a monoVertex and creates it. Returns the server's representation of the monoVertex, and an error, if there is any. +func (c *monoVertices) Create(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.CreateOptions) (result *v1alpha1.MonoVertex, err error) { + result = &v1alpha1.MonoVertex{} + err = c.client.Post(). + Namespace(c.ns). + Resource("monovertices"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(monoVertex). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a monoVertex and updates it. Returns the server's representation of the monoVertex, and an error, if there is any. +func (c *monoVertices) Update(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.UpdateOptions) (result *v1alpha1.MonoVertex, err error) { + result = &v1alpha1.MonoVertex{} + err = c.client.Put(). + Namespace(c.ns). + Resource("monovertices"). + Name(monoVertex.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(monoVertex). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *monoVertices) UpdateStatus(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.UpdateOptions) (result *v1alpha1.MonoVertex, err error) { + result = &v1alpha1.MonoVertex{} + err = c.client.Put(). + Namespace(c.ns). + Resource("monovertices"). + Name(monoVertex.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(monoVertex). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the monoVertex and deletes it. Returns an error if one occurs. +func (c *monoVertices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("monovertices"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *monoVertices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("monovertices"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched monoVertex. +func (c *monoVertices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.MonoVertex, err error) { + result = &v1alpha1.MonoVertex{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("monovertices"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/numaflow_client.go b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/numaflow_client.go index c1d76c8d17..395858a189 100644 --- a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/numaflow_client.go +++ b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/numaflow_client.go @@ -29,6 +29,7 @@ import ( type NumaflowV1alpha1Interface interface { RESTClient() rest.Interface InterStepBufferServicesGetter + MonoVerticesGetter PipelinesGetter VerticesGetter } @@ -42,6 +43,10 @@ func (c *NumaflowV1alpha1Client) InterStepBufferServices(namespace string) Inter return newInterStepBufferServices(c, namespace) } +func (c *NumaflowV1alpha1Client) MonoVertices(namespace string) MonoVertexInterface { + return newMonoVertices(c, namespace) +} + func (c *NumaflowV1alpha1Client) Pipelines(namespace string) PipelineInterface { return newPipelines(c, namespace) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index be620d8076..ab826ef695 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -55,6 +55,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=numaflow.numaproj.io, Version=v1alpha1 case v1alpha1.SchemeGroupVersion.WithResource("interstepbufferservices"): return &genericInformer{resource: resource.GroupResource(), informer: f.Numaflow().V1alpha1().InterStepBufferServices().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("monovertices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Numaflow().V1alpha1().MonoVertices().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("pipelines"): return &genericInformer{resource: resource.GroupResource(), informer: f.Numaflow().V1alpha1().Pipelines().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("vertices"): diff --git a/pkg/client/informers/externalversions/numaflow/v1alpha1/interface.go b/pkg/client/informers/externalversions/numaflow/v1alpha1/interface.go index 80c042dbc7..d7c5c9fda0 100644 --- a/pkg/client/informers/externalversions/numaflow/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/numaflow/v1alpha1/interface.go @@ -26,6 +26,8 @@ import ( type Interface interface { // InterStepBufferServices returns a InterStepBufferServiceInformer. InterStepBufferServices() InterStepBufferServiceInformer + // MonoVertices returns a MonoVertexInformer. + MonoVertices() MonoVertexInformer // Pipelines returns a PipelineInformer. Pipelines() PipelineInformer // Vertices returns a VertexInformer. @@ -48,6 +50,11 @@ func (v *version) InterStepBufferServices() InterStepBufferServiceInformer { return &interStepBufferServiceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// MonoVertices returns a MonoVertexInformer. +func (v *version) MonoVertices() MonoVertexInformer { + return &monoVertexInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // Pipelines returns a PipelineInformer. func (v *version) Pipelines() PipelineInformer { return &pipelineInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/informers/externalversions/numaflow/v1alpha1/monovertex.go b/pkg/client/informers/externalversions/numaflow/v1alpha1/monovertex.go new file mode 100644 index 0000000000..220d1953ea --- /dev/null +++ b/pkg/client/informers/externalversions/numaflow/v1alpha1/monovertex.go @@ -0,0 +1,90 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + numaflowv1alpha1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + versioned "github.com/numaproj/numaflow/pkg/client/clientset/versioned" + internalinterfaces "github.com/numaproj/numaflow/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/numaproj/numaflow/pkg/client/listers/numaflow/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// MonoVertexInformer provides access to a shared informer and lister for +// MonoVertices. +type MonoVertexInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.MonoVertexLister +} + +type monoVertexInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewMonoVertexInformer constructs a new informer for MonoVertex type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMonoVertexInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMonoVertexInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredMonoVertexInformer constructs a new informer for MonoVertex type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMonoVertexInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NumaflowV1alpha1().MonoVertices(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NumaflowV1alpha1().MonoVertices(namespace).Watch(context.TODO(), options) + }, + }, + &numaflowv1alpha1.MonoVertex{}, + resyncPeriod, + indexers, + ) +} + +func (f *monoVertexInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMonoVertexInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *monoVertexInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&numaflowv1alpha1.MonoVertex{}, f.defaultInformer) +} + +func (f *monoVertexInformer) Lister() v1alpha1.MonoVertexLister { + return v1alpha1.NewMonoVertexLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/listers/numaflow/v1alpha1/expansion_generated.go b/pkg/client/listers/numaflow/v1alpha1/expansion_generated.go index 3c6cf065ce..8093700f36 100644 --- a/pkg/client/listers/numaflow/v1alpha1/expansion_generated.go +++ b/pkg/client/listers/numaflow/v1alpha1/expansion_generated.go @@ -26,6 +26,14 @@ type InterStepBufferServiceListerExpansion interface{} // InterStepBufferServiceNamespaceLister. type InterStepBufferServiceNamespaceListerExpansion interface{} +// MonoVertexListerExpansion allows custom methods to be added to +// MonoVertexLister. +type MonoVertexListerExpansion interface{} + +// MonoVertexNamespaceListerExpansion allows custom methods to be added to +// MonoVertexNamespaceLister. +type MonoVertexNamespaceListerExpansion interface{} + // PipelineListerExpansion allows custom methods to be added to // PipelineLister. type PipelineListerExpansion interface{} diff --git a/pkg/client/listers/numaflow/v1alpha1/monovertex.go b/pkg/client/listers/numaflow/v1alpha1/monovertex.go new file mode 100644 index 0000000000..a120405445 --- /dev/null +++ b/pkg/client/listers/numaflow/v1alpha1/monovertex.go @@ -0,0 +1,99 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// MonoVertexLister helps list MonoVertices. +// All objects returned here must be treated as read-only. +type MonoVertexLister interface { + // List lists all MonoVertices in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.MonoVertex, err error) + // MonoVertices returns an object that can list and get MonoVertices. + MonoVertices(namespace string) MonoVertexNamespaceLister + MonoVertexListerExpansion +} + +// monoVertexLister implements the MonoVertexLister interface. +type monoVertexLister struct { + indexer cache.Indexer +} + +// NewMonoVertexLister returns a new MonoVertexLister. +func NewMonoVertexLister(indexer cache.Indexer) MonoVertexLister { + return &monoVertexLister{indexer: indexer} +} + +// List lists all MonoVertices in the indexer. +func (s *monoVertexLister) List(selector labels.Selector) (ret []*v1alpha1.MonoVertex, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.MonoVertex)) + }) + return ret, err +} + +// MonoVertices returns an object that can list and get MonoVertices. +func (s *monoVertexLister) MonoVertices(namespace string) MonoVertexNamespaceLister { + return monoVertexNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// MonoVertexNamespaceLister helps list and get MonoVertices. +// All objects returned here must be treated as read-only. +type MonoVertexNamespaceLister interface { + // List lists all MonoVertices in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.MonoVertex, err error) + // Get retrieves the MonoVertex from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.MonoVertex, error) + MonoVertexNamespaceListerExpansion +} + +// monoVertexNamespaceLister implements the MonoVertexNamespaceLister +// interface. +type monoVertexNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all MonoVertices in the indexer for a given namespace. +func (s monoVertexNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.MonoVertex, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.MonoVertex)) + }) + return ret, err +} + +// Get retrieves the MonoVertex from the indexer for a given namespace and name. +func (s monoVertexNamespaceLister) Get(name string) (*v1alpha1.MonoVertex, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("monovertex"), name) + } + return obj.(*v1alpha1.MonoVertex), nil +} diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 31153dabe8..be3b34aff7 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -28,6 +28,7 @@ const ( LabelISBService = "isbsvc" LabelPipeline = "pipeline" LabelVertex = "vertex" + LabelMonoVertex = "mono_vertex" LabelVertexReplicaIndex = "replica" LabelVertexType = "vertex_type" LabelPartitionName = "partition_name" diff --git a/pkg/mvtxdaemon/server/daemon_server.go b/pkg/mvtxdaemon/server/daemon_server.go new file mode 100644 index 0000000000..29553493e6 --- /dev/null +++ b/pkg/mvtxdaemon/server/daemon_server.go @@ -0,0 +1,165 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/http/pprof" + "os" + "time" + + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/soheilhy/cmux" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/numaproj/numaflow" + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + "github.com/numaproj/numaflow/pkg/apis/proto/daemon" + "github.com/numaproj/numaflow/pkg/shared/logging" + sharedtls "github.com/numaproj/numaflow/pkg/shared/tls" +) + +type daemonServer struct { + monoVtx *v1alpha1.MonoVertex +} + +func NewDaemonServer(monoVtx *v1alpha1.MonoVertex) *daemonServer { + return &daemonServer{ + monoVtx: monoVtx, + } +} + +func (ds *daemonServer) Run(ctx context.Context) error { + log := logging.FromContext(ctx) + var ( + err error + ) + + // Start listener + var conn net.Listener + var listerErr error + address := fmt.Sprintf(":%d", v1alpha1.MonoVertexDaemonServicePort) + conn, err = net.Listen("tcp", address) + if err != nil { + return fmt.Errorf("failed to listen: %v", listerErr) + } + + cer, err := sharedtls.GenerateX509KeyPair() + if err != nil { + return fmt.Errorf("failed to generate cert: %w", err) + } + + tlsConfig := &tls.Config{Certificates: []tls.Certificate{*cer}, MinVersion: tls.VersionTLS12} + grpcServer, err := ds.newGRPCServer() + if err != nil { + return fmt.Errorf("failed to create grpc server: %w", err) + } + httpServer := ds.newHTTPServer(ctx, v1alpha1.DaemonServicePort, tlsConfig) + + conn = tls.NewListener(conn, tlsConfig) + // Cmux is used to support servicing gRPC and HTTP1.1+JSON on the same port + tcpm := cmux.New(conn) + httpL := tcpm.Match(cmux.HTTP1Fast()) + grpcL := tcpm.Match(cmux.Any()) + + go func() { _ = grpcServer.Serve(grpcL) }() + go func() { _ = httpServer.Serve(httpL) }() + go func() { _ = tcpm.Serve() }() + + version := numaflow.GetVersion() + mono_vertex_info.WithLabelValues(version.Version, version.Platform, ds.monoVtx.Name).Set(1) + + log.Infof("MonoVertex daemon server started successfully on %s", address) + <-ctx.Done() + return nil +} + +func (ds *daemonServer) newGRPCServer() (*grpc.Server, error) { + // "Prometheus histograms are a great way to measure latency distributions of your RPCs. + // However, since it is a bad practice to have metrics of high cardinality the latency monitoring metrics are disabled by default. + // To enable them please call the following in your server initialization code:" + grpc_prometheus.EnableHandlingTimeHistogram() + + sOpts := []grpc.ServerOption{ + grpc.ConnectionTimeout(300 * time.Second), + grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( + grpc_prometheus.UnaryServerInterceptor, + )), + } + grpcServer := grpc.NewServer(sOpts...) + grpc_prometheus.Register(grpcServer) + return grpcServer, nil +} + +// newHTTPServer returns the HTTP server to serve HTTP/HTTPS requests. This is implemented +// using grpc-gateway as a proxy to the gRPC server. +func (ds *daemonServer) newHTTPServer(ctx context.Context, port int, tlsConfig *tls.Config) *http.Server { + log := logging.FromContext(ctx) + endpoint := fmt.Sprintf(":%d", port) + dialOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{InsecureSkipVerify: true})), + } + gwMuxOpts := runtime.WithMarshalerOption(runtime.MIMEWildcard, new(runtime.JSONPb)) + gwmux := runtime.NewServeMux(gwMuxOpts, + runtime.WithIncomingHeaderMatcher(func(key string) (string, bool) { + if key == "Connection" { + // Remove "Connection: keep-alive", which is always included in the header of a browser access, + // it will cause "500 Internal Server Error caused by: stream error: stream ID 19; PROTOCOL_ERROR" + return key, false + } + return key, true + }), + ) + if err := daemon.RegisterDaemonServiceHandlerFromEndpoint(ctx, gwmux, endpoint, dialOpts); err != nil { + log.Errorw("Failed to register daemon handler on HTTP Server", zap.Error(err)) + } + mux := http.NewServeMux() + httpServer := http.Server{ + Addr: endpoint, + Handler: mux, + TLSConfig: tlsConfig, + } + mux.Handle("/api/", gwmux) + mux.HandleFunc("/readyz", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + }) + mux.HandleFunc("/livez", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + }) + mux.Handle("/metrics", promhttp.Handler()) + pprofEnabled := os.Getenv(v1alpha1.EnvDebug) == "true" || os.Getenv(v1alpha1.EnvPPROF) == "true" + if pprofEnabled { + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + } else { + log.Info("Not enabling pprof debug endpoints") + } + + return &httpServer +} diff --git a/pkg/mvtxdaemon/server/metrics.go b/pkg/mvtxdaemon/server/metrics.go new file mode 100644 index 0000000000..f0aa155c31 --- /dev/null +++ b/pkg/mvtxdaemon/server/metrics.go @@ -0,0 +1,32 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/numaproj/numaflow/pkg/metrics" +) + +var ( + mono_vertex_info = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: "monovtx", + Name: "build_info", + Help: "A metric with a constant value '1', labeled by Numaflow binary version and platform, as well as the mono vertex name", + }, []string{metrics.LabelVersion, metrics.LabelPlatform, metrics.LabelMonoVertex}) +) diff --git a/pkg/reconciler/cmd/start.go b/pkg/reconciler/cmd/start.go index 1b65402431..f7fa5f295f 100644 --- a/pkg/reconciler/cmd/start.go +++ b/pkg/reconciler/cmd/start.go @@ -39,6 +39,7 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/reconciler" isbsvcctrl "github.com/numaproj/numaflow/pkg/reconciler/isbsvc" + monovtxctrl "github.com/numaproj/numaflow/pkg/reconciler/monovertex" plctrl "github.com/numaproj/numaflow/pkg/reconciler/pipeline" vertexctrl "github.com/numaproj/numaflow/pkg/reconciler/vertex" "github.com/numaproj/numaflow/pkg/reconciler/vertex/scaling" @@ -194,7 +195,7 @@ func Start(namespaced bool, managedNamespace string) { logger.Fatalw("Unable to watch Services", zap.Error(err)) } - // Watch Deployments with Generation changes + // Watch Deployments changes if err := pipelineController.Watch(source.Kind(mgr.GetCache(), &appv1.Deployment{}), handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.Pipeline{}, handler.OnlyControllerOwner()), predicate.ResourceVersionChangedPredicate{}); err != nil { @@ -235,6 +236,46 @@ func Start(namespaced bool, managedNamespace string) { logger.Fatalw("Unable to watch Services", zap.Error(err)) } + // MonoVertex controller + monoVertexController, err := controller.New(dfv1.ControllerMonoVertex, mgr, controller.Options{ + Reconciler: monovtxctrl.NewReconciler(mgr.GetClient(), mgr.GetScheme(), config, image, logger, mgr.GetEventRecorderFor(dfv1.ControllerMonoVertex)), + }) + if err != nil { + logger.Fatalw("Unable to set up MonoVertex controller", zap.Error(err)) + } + + // Watch MonoVertices + if err := monoVertexController.Watch(source.Kind(mgr.GetCache(), &dfv1.MonoVertex{}), &handler.EnqueueRequestForObject{}, + predicate.Or( + predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}, + )); err != nil { + logger.Fatalw("Unable to watch MonoVertices", zap.Error(err)) + } + + // Watch Pods + if err := monoVertexController.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}), + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.MonoVertex{}, handler.OnlyControllerOwner()), + predicate.ResourceVersionChangedPredicate{}, + predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { return false }, // Do not watch pod create events + }); err != nil { + logger.Fatalw("Unable to watch Pods", zap.Error(err)) + } + + // Watch Services with ResourceVersion changes + if err := monoVertexController.Watch(source.Kind(mgr.GetCache(), &corev1.Service{}), + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.MonoVertex{}, handler.OnlyControllerOwner()), + predicate.ResourceVersionChangedPredicate{}); err != nil { + logger.Fatalw("Unable to watch Services", zap.Error(err)) + } + + // Watch Deployments changes + if err := monoVertexController.Watch(source.Kind(mgr.GetCache(), &appv1.Deployment{}), + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.MonoVertex{}, handler.OnlyControllerOwner()), + predicate.ResourceVersionChangedPredicate{}); err != nil { + logger.Fatalw("Unable to watch Deployments", zap.Error(err)) + } + // Add autoscaling runner if err := mgr.Add(LeaderElectionRunner(autoscaler.Start)); err != nil { logger.Fatalw("Unable to add autoscaling runner", zap.Error(err)) diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go new file mode 100644 index 0000000000..3688d87b60 --- /dev/null +++ b/pkg/reconciler/monovertex/controller.go @@ -0,0 +1,460 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package monovertex + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "go.uber.org/zap" + appv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + "github.com/numaproj/numaflow/pkg/reconciler" + "github.com/numaproj/numaflow/pkg/shared/logging" + sharedutil "github.com/numaproj/numaflow/pkg/shared/util" +) + +// monoVertexReconciler reconciles a MonoVertex object. +type monoVertexReconciler struct { + client client.Client + scheme *runtime.Scheme + + config *reconciler.GlobalConfig + image string + logger *zap.SugaredLogger + + recorder record.EventRecorder +} + +func NewReconciler(client client.Client, scheme *runtime.Scheme, config *reconciler.GlobalConfig, image string, logger *zap.SugaredLogger, recorder record.EventRecorder) reconcile.Reconciler { + return &monoVertexReconciler{client: client, scheme: scheme, config: config, image: image, logger: logger, recorder: recorder} +} + +func (mr *monoVertexReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + monoVtx := &dfv1.MonoVertex{} + if err := mr.client.Get(ctx, req.NamespacedName, monoVtx); err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + mr.logger.Errorw("Unable to get MonoVertex", zap.Any("request", req), zap.Error(err)) + return ctrl.Result{}, err + } + log := mr.logger.With("namespace", monoVtx.Namespace).With("monoVertex", monoVtx.Name) + ctx = logging.WithLogger(ctx, log) + monoVtxCopy := monoVtx.DeepCopy() + result, err := mr.reconcile(ctx, monoVtxCopy) + if err != nil { + log.Errorw("Reconcile error", zap.Error(err)) + } + + if !equality.Semantic.DeepEqual(monoVtx.Status, monoVtxCopy.Status) { + if err := mr.client.Status().Update(ctx, monoVtxCopy); err != nil { + return reconcile.Result{}, err + } + } + return result, err +} + +// reconcile does the real logic. +func (mr *monoVertexReconciler) reconcile(ctx context.Context, monoVtx *dfv1.MonoVertex) (ctrl.Result, error) { + log := logging.FromContext(ctx) + if !monoVtx.DeletionTimestamp.IsZero() { + log.Info("Deleting mono vertex") + return ctrl.Result{}, nil + } + + monoVtx.Status.SetObservedGeneration(monoVtx.Generation) + + // TODO: handle lifecycle changes + + // Regular mono vertex change + result, err := mr.reconcileNonLifecycleChanges(ctx, monoVtx) + if err != nil { + mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "ReconcileMonoVertexFailed", "Failed to reconcile a mono vertex: %v", err.Error()) + } + return result, err +} + +func (mr *monoVertexReconciler) reconcileNonLifecycleChanges(ctx context.Context, monoVtx *dfv1.MonoVertex) (ctrl.Result, error) { + // Create or update mono vtx services + if err := mr.createOrUpdateMonoVtxServices(ctx, monoVtx); err != nil { + return ctrl.Result{}, err + } + + // Mono vtx daemon service + if err := mr.createOrUpdateDaemonService(ctx, monoVtx); err != nil { + return ctrl.Result{}, err + } + + // Mono vtx daemon deployment + if err := mr.createOrUpdateDaemonDeployment(ctx, monoVtx); err != nil { + return ctrl.Result{}, err + } + + // Create pods + if err := mr.reconcilePods(ctx, monoVtx); err != nil { + return ctrl.Result{}, err + } + monoVtx.Status.MarkDeployed() + + // Mark it running before checking the status of the pods + monoVtx.Status.MarkPhaseRunning() + + // Check children resource status + if err := mr.checkChildrenResourceStatus(ctx, monoVtx); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to check mono vertex children resource status, %w", err) + } + return ctrl.Result{}, nil +} + +func (mr *monoVertexReconciler) reconcilePods(ctx context.Context, monoVtx *dfv1.MonoVertex) error { + log := logging.FromContext(ctx) + existingPods, err := mr.findExistingPods(ctx, monoVtx) + if err != nil { + mr.markDeploymentFailedAndLogEvent(monoVtx, false, log, "FindExistingPodFailed", err.Error(), "Failed to find existing mono vertex pods", zap.Error(err)) + return err + } + desiredReplicas := monoVtx.GetReplicas() + for replica := 0; replica < desiredReplicas; replica++ { + podSpec, err := mr.buildPodSpec(monoVtx) + if err != nil { + mr.markDeploymentFailedAndLogEvent(monoVtx, false, log, "PodSpecGenFailed", err.Error(), "Failed to generate mono vertex pod spec", zap.Error(err)) + return err + } + hash := sharedutil.MustHash(podSpec) + podNamePrefix := fmt.Sprintf("%s-mv-%d-", monoVtx.Name, replica) + needToCreate := true + for existingPodName, existingPod := range existingPods { + if strings.HasPrefix(existingPodName, podNamePrefix) { + if existingPod.GetAnnotations()[dfv1.KeyHash] == hash && existingPod.Status.Phase != corev1.PodFailed { + needToCreate = false + delete(existingPods, existingPodName) + } + break + } + } + if needToCreate { + labels := map[string]string{} + annotations := map[string]string{} + if x := monoVtx.Spec.Metadata; x != nil { + for k, v := range x.Annotations { + annotations[k] = v + } + for k, v := range x.Labels { + labels[k] = v + } + } + labels[dfv1.KeyPartOf] = dfv1.Project + labels[dfv1.KeyManagedBy] = dfv1.ControllerMonoVertex + labels[dfv1.KeyComponent] = dfv1.ComponentMonoVertex + labels[dfv1.KeyAppName] = monoVtx.Name + labels[dfv1.KeyMonoVertexName] = monoVtx.Name + annotations[dfv1.KeyHash] = hash + annotations[dfv1.KeyReplica] = strconv.Itoa(replica) + // Defaults to udf + annotations[dfv1.KeyDefaultContainer] = dfv1.CtrMain + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: monoVtx.Namespace, + Name: podNamePrefix + sharedutil.RandomLowerCaseString(5), + Labels: labels, + Annotations: annotations, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(monoVtx.GetObjectMeta(), dfv1.MonoVertexGroupVersionKind)}, + }, + Spec: *podSpec, + } + pod.Spec.Hostname = fmt.Sprintf("%s-mv-%d", monoVtx.Name, replica) + if err := mr.client.Create(ctx, pod); err != nil { + mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "CreatePodFailed", err.Error(), "Failed to created a mono vertex pod", zap.Error(err)) + return err + } + log.Infow("Succeeded to create a mono vertex pod", zap.String("pod", pod.Name)) + mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "CreatePodSuccess", "Succeeded to create a mono vertex pod %s", pod.Name) + } + } + for _, v := range existingPods { + if err := mr.client.Delete(ctx, &v); err != nil && !apierrors.IsNotFound(err) { + mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "DelPodFailed", err.Error(), "Failed to delete a mono vertex pod", zap.Error(err)) + return err + } + } + + currentReplicas := int(monoVtx.Status.Replicas) + if currentReplicas != desiredReplicas || monoVtx.Status.Selector == "" { + log.Infow("MonoVertex replicas changed", "currentReplicas", currentReplicas, "desiredReplicas", desiredReplicas) + mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "ReplicasScaled", "Replicas changed from %d to %d", currentReplicas, desiredReplicas) + monoVtx.Status.Replicas = uint32(desiredReplicas) + monoVtx.Status.LastScaledAt = metav1.Time{Time: time.Now()} + } + selector, _ := labels.Parse(dfv1.KeyComponent + "=" + dfv1.ComponentMonoVertex + "," + dfv1.KeyMonoVertexName + "=" + monoVtx.Name) + monoVtx.Status.Selector = selector.String() + + return nil +} + +func (mr *monoVertexReconciler) createOrUpdateMonoVtxServices(ctx context.Context, monoVtx *dfv1.MonoVertex) error { + log := logging.FromContext(ctx) + existingSvcs, err := mr.findExistingMonoVtxServices(ctx, monoVtx) + if err != nil { + log.Errorw("Failed to find existing MonoVertex services", zap.Error(err)) + monoVtx.Status.MarkPhaseFailed("FindExistingSvcsFailed", err.Error()) + return err + } + for _, s := range monoVtx.GetServiceObjs() { + svcHash := sharedutil.MustHash(s.Spec) + s.Annotations = map[string]string{dfv1.KeyHash: svcHash} + needToCreate := false + if existingSvc, existing := existingSvcs[s.Name]; existing { + if existingSvc.GetAnnotations()[dfv1.KeyHash] != svcHash { + if err := mr.client.Delete(ctx, &existingSvc); err != nil { + if !apierrors.IsNotFound(err) { + mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "DelSvcFailed", err.Error(), "Failed to delete existing mono vertex service", zap.String("service", existingSvc.Name), zap.Error(err)) + return err + } + } else { + log.Infow("Deleted a stale mono vertex service to recreate", zap.String("service", existingSvc.Name)) + mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "DelSvcSuccess", "Deleted stale mono vertex service %s to recreate", existingSvc.Name) + } + needToCreate = true + } + delete(existingSvcs, s.Name) + } else { + needToCreate = true + } + if needToCreate { + if err := mr.client.Create(ctx, s); err != nil { + if apierrors.IsAlreadyExists(err) { + continue + } + mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "CreateSvcFailed", err.Error(), "Failed to create a mono vertex service", zap.String("service", s.Name), zap.Error(err)) + return err + } else { + log.Infow("Succeeded to create a mono vertex service", zap.String("service", s.Name)) + mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "CreateSvcSuccess", "Succeeded to create mono vertex service %s", s.Name) + } + } + } + for _, v := range existingSvcs { // clean up stale services + if err := mr.client.Delete(ctx, &v); err != nil { + if !apierrors.IsNotFound(err) { + mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "DelSvcFailed", err.Error(), "Failed to delete mono vertex service not in use", zap.String("service", v.Name), zap.Error(err)) + return err + } + } else { + log.Infow("Deleted a stale mono vertx service", zap.String("service", v.Name)) + mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "DelSvcSuccess", "Deleted stale mono vertex service %s", v.Name) + } + } + return nil +} + +func (mr *monoVertexReconciler) findExistingMonoVtxServices(ctx context.Context, monoVtx *dfv1.MonoVertex) (map[string]corev1.Service, error) { + svcs := &corev1.ServiceList{} + selector, _ := labels.Parse(dfv1.KeyComponent + "=" + dfv1.ComponentMonoVertex + "," + dfv1.KeyMonoVertexName + "=" + monoVtx.Name) + if err := mr.client.List(ctx, svcs, &client.ListOptions{Namespace: monoVtx.Namespace, LabelSelector: selector}); err != nil { + return nil, fmt.Errorf("failed to list Mono Vertex services: %w", err) + } + result := make(map[string]corev1.Service) + for _, v := range svcs.Items { + result[v.Name] = v + } + return result, nil +} + +func (mr *monoVertexReconciler) createOrUpdateDaemonService(ctx context.Context, monoVtx *dfv1.MonoVertex) error { + log := logging.FromContext(ctx) + svc := monoVtx.GetDaemonServiceObj() + svcHash := sharedutil.MustHash(svc.Spec) + svc.Annotations = map[string]string{dfv1.KeyHash: svcHash} + existingSvc := &corev1.Service{} + needToCreatDaemonSvc := false + if err := mr.client.Get(ctx, types.NamespacedName{Namespace: monoVtx.Namespace, Name: svc.Name}, existingSvc); err != nil { + if apierrors.IsNotFound(err) { + needToCreatDaemonSvc = true + } else { + mr.markDeploymentFailedAndLogEvent(monoVtx, false, log, "FindDaemonSvcFailed", err.Error(), "Failed to find existing mono vtx daemon service", zap.String("service", svc.Name), zap.Error(err)) + return err + } + } else if existingSvc.GetAnnotations()[dfv1.KeyHash] != svcHash { + if err := mr.client.Delete(ctx, existingSvc); err != nil && !apierrors.IsNotFound(err) { + mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "DelDaemonSvcFailed", err.Error(), "Failed to delete existing mono vtx daemon service", zap.String("service", existingSvc.Name), zap.Error(err)) + return err + } + needToCreatDaemonSvc = true + } + if needToCreatDaemonSvc { + if err := mr.client.Create(ctx, svc); err != nil { + mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "CreateDaemonSvcFailed", err.Error(), "Failed to create mono vtx daemon service", zap.String("service", svc.Name), zap.Error(err)) + return err + } + log.Infow("Succeeded to create a mono vertex daemon service", zap.String("service", svc.Name)) + mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "CreateMonoVtxDaemonSvcSuccess", "Succeeded to create a mono vertex daemon service %s", svc.Name) + } + return nil +} + +func (mr *monoVertexReconciler) createOrUpdateDaemonDeployment(ctx context.Context, monoVtx *dfv1.MonoVertex) error { + log := logging.FromContext(ctx) + envs := []corev1.EnvVar{{Name: dfv1.EnvMonoVertexName, Value: monoVtx.Name}} + + req := dfv1.GetMonoVertexDaemonDeploymentReq{ + Image: mr.image, + PullPolicy: corev1.PullPolicy(sharedutil.LookupEnvStringOr(dfv1.EnvImagePullPolicy, "")), + Env: envs, + DefaultResources: mr.config.GetDefaults().GetDefaultContainerResources(), + } + deploy, err := monoVtx.GetDaemonDeploymentObj(req) + if err != nil { + mr.markDeploymentFailedAndLogEvent(monoVtx, false, log, "BuildDaemonDeployFailed", err.Error(), "Failed to build mono verex daemon deployment spec", zap.Error(err)) + return err + } + deployHash := sharedutil.MustHash(deploy.Spec) + deploy.Annotations = map[string]string{dfv1.KeyHash: deployHash} + existingDeploy := &appv1.Deployment{} + needToCreate := false + if err := mr.client.Get(ctx, types.NamespacedName{Namespace: monoVtx.Namespace, Name: deploy.Name}, existingDeploy); err != nil { + if !apierrors.IsNotFound(err) { + mr.markDeploymentFailedAndLogEvent(monoVtx, false, log, "FindDaemonDeployFailed", err.Error(), "Failed to find existing mono vertex daemon deployment", zap.String("deployment", deploy.Name), zap.Error(err)) + return err + } else { + needToCreate = true + } + } else { + if existingDeploy.GetAnnotations()[dfv1.KeyHash] != deployHash { + // Delete and recreate, to avoid updating immutable fields problem. + if err := mr.client.Delete(ctx, existingDeploy); err != nil { + mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "DeleteOldDaemonDeployFailed", err.Error(), "Failed to delete the outdated daemon deployment", zap.String("deployment", existingDeploy.Name), zap.Error(err)) + return err + } + needToCreate = true + } + } + if needToCreate { + if err := mr.client.Create(ctx, deploy); err != nil && !apierrors.IsAlreadyExists(err) { + mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "CreateDaemonDeployFailed", err.Error(), "Failed to create a mono vertex daemon deployment", zap.String("deployment", deploy.Name), zap.Error(err)) + return err + } + log.Infow("Succeeded to create/recreate a mono vertex daemon deployment", zap.String("deployment", deploy.Name)) + mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "CreateDaemonDeploySuccess", "Succeeded to create/recreate a mono vertex daemon deployment %s", deploy.Name) + } + return nil +} + +func (r *monoVertexReconciler) findExistingPods(ctx context.Context, monoVtx *dfv1.MonoVertex) (map[string]corev1.Pod, error) { + pods := &corev1.PodList{} + selector, _ := labels.Parse(dfv1.KeyComponent + "=" + dfv1.ComponentMonoVertex + "," + dfv1.KeyMonoVertexName + "=" + monoVtx.Name) + if err := r.client.List(ctx, pods, &client.ListOptions{Namespace: monoVtx.Namespace, LabelSelector: selector}); err != nil { + return nil, fmt.Errorf("failed to list mono vertex pods: %w", err) + } + result := make(map[string]corev1.Pod) + for _, v := range pods.Items { + if !v.DeletionTimestamp.IsZero() { + // Ignore pods being deleted + continue + } + result[v.Name] = v + } + return result, nil +} + +func (mr *monoVertexReconciler) buildPodSpec(monoVtx *dfv1.MonoVertex) (*corev1.PodSpec, error) { + podSpec, err := monoVtx.GetPodSpec(dfv1.GetMonoVertexPodSpecReq{ + Image: mr.image, + PullPolicy: corev1.PullPolicy(sharedutil.LookupEnvStringOr(dfv1.EnvImagePullPolicy, "")), + DefaultResources: mr.config.GetDefaults().GetDefaultContainerResources(), + }) + if err != nil { + return nil, fmt.Errorf("failed to generate mono vertex pod spec, error: %w", err) + } + + // Attach secret or configmap volumes if any + vols, volMounts := sharedutil.VolumesFromSecretsAndConfigMaps(monoVtx) + podSpec.Volumes = append(podSpec.Volumes, vols...) + podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volMounts...) + return podSpec, nil +} + +// Helper function for warning event types +func (mr *monoVertexReconciler) markDeploymentFailedAndLogEvent(monoVtx *dfv1.MonoVertex, recordEvent bool, log *zap.SugaredLogger, reason, message, logMsg string, logWith ...interface{}) { + log.Errorw(logMsg, logWith) + monoVtx.Status.MarkDeployFailed(reason, message) + if recordEvent { + mr.recorder.Event(monoVtx, corev1.EventTypeWarning, reason, message) + } +} + +// checkChildrenResourceStatus checks the status of the children resources of the mono vertex +func (mr *monoVertexReconciler) checkChildrenResourceStatus(ctx context.Context, monoVtx *dfv1.MonoVertex) error { + defer func() { + for _, c := range monoVtx.Status.Conditions { + if c.Status != metav1.ConditionTrue { + monoVtx.Status.Message = "Degraded: " + c.Message + monoVtx.Status.Reason = "ChildResourceUnhealthy" + return + } + } + }() + + // get the mono vertex daemon deployment and update the status of it to the pipeline + var daemonDeployment appv1.Deployment + if err := mr.client.Get(ctx, client.ObjectKey{Namespace: monoVtx.GetNamespace(), Name: monoVtx.GetDaemonDeploymentName()}, + &daemonDeployment); err != nil { + if apierrors.IsNotFound(err) { + monoVtx.Status.MarkDaemonUnHealthy( + "GetDaemonServiceFailed", "Deployment not found, might be still under creation") + return nil + } + monoVtx.Status.MarkDaemonUnHealthy("GetDaemonServiceFailed", err.Error()) + return err + } + if status, reason, msg := reconciler.CheckDeploymentStatus(&daemonDeployment); status { + monoVtx.Status.MarkDaemonHealthy() + } else { + monoVtx.Status.MarkDaemonUnHealthy(reason, msg) + } + + // Check status of the pods + selector, _ := labels.Parse(dfv1.KeyComponent + "=" + dfv1.ComponentMonoVertex + "," + dfv1.KeyMonoVertexName + "=" + monoVtx.Name) + var podList corev1.PodList + if err := mr.client.List(ctx, &podList, &client.ListOptions{Namespace: monoVtx.GetNamespace(), LabelSelector: selector}); err != nil { + monoVtx.Status.MarkPodNotHealthy("ListMonoVerticesPodsFailed", err.Error()) + return fmt.Errorf("failed to get pods of a vertex: %w", err) + } + if healthy, reason, msg := reconciler.CheckVertexPodsStatus(&podList); healthy { + monoVtx.Status.MarkPodHealthy(reason, msg) + } else { + // Do not need to explicitly requeue, since the it keeps watching the status change of the pods + monoVtx.Status.MarkPodNotHealthy(reason, msg) + } + + return nil +} diff --git a/pkg/reconciler/monovertex/controller_test.go b/pkg/reconciler/monovertex/controller_test.go new file mode 100644 index 0000000000..d84ddef68a --- /dev/null +++ b/pkg/reconciler/monovertex/controller_test.go @@ -0,0 +1,17 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package monovertex diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index cffd45b93c..c9d824e941 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -623,7 +623,7 @@ func buildVertices(pl *dfv1.Pipeline) map[string]dfv1.Vertex { Watermark: pl.Spec.Watermark, Replicas: &replicas, } - hash := sharedutil.MustHash(spec.WithOutReplicas()) + hash := sharedutil.MustHash(spec.DeepCopyWithoutReplicas()) obj := dfv1.Vertex{ ObjectMeta: metav1.ObjectMeta{ Namespace: pl.Namespace, @@ -946,6 +946,7 @@ func (r *pipelineReconciler) checkChildrenResourceStatus(ctx context.Context, pi for _, c := range pipeline.Status.Conditions { if c.Status != metav1.ConditionTrue { pipeline.Status.SetPhase(pipeline.Spec.Lifecycle.GetDesiredPhase(), "Degraded: "+c.Message) + return } } }() diff --git a/pkg/reconciler/util.go b/pkg/reconciler/util.go index a9d58871b4..f12573ab08 100644 --- a/pkg/reconciler/util.go +++ b/pkg/reconciler/util.go @@ -48,6 +48,9 @@ func isPodHealthy(pod *corev1.Pod) (healthy bool, reason string) { if c.State.Waiting != nil && c.State.Waiting.Reason == "CrashLoopBackOff" { return false, c.State.Waiting.Reason } + if c.State.Terminated != nil && c.State.Terminated.Reason == "Error" { + return false, c.State.Terminated.Reason + } } return true, "" } diff --git a/pkg/reconciler/vertex/controller.go b/pkg/reconciler/vertex/controller.go index a4a84d1e5e..b88b3b94f8 100644 --- a/pkg/reconciler/vertex/controller.go +++ b/pkg/reconciler/vertex/controller.go @@ -313,7 +313,7 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( currentReplicas := int(vertex.Status.Replicas) if currentReplicas != desiredReplicas || vertex.Status.Selector == "" { - log.Infow("Replicas changed", "currentReplicas", currentReplicas, "desiredReplicas", desiredReplicas) + log.Infow("Pipeline Vertex replicas changed", "currentReplicas", currentReplicas, "desiredReplicas", desiredReplicas) r.recorder.Eventf(vertex, corev1.EventTypeNormal, "ReplicasScaled", "Replicas changed from %d to %d", currentReplicas, desiredReplicas) vertex.Status.Replicas = uint32(desiredReplicas) vertex.Status.LastScaledAt = metav1.Time{Time: time.Now()} diff --git a/serving/numaflow-models/src/models/get_mono_vertex_daemon_deployment_req.rs b/serving/numaflow-models/src/models/get_mono_vertex_daemon_deployment_req.rs new file mode 100644 index 0000000000..6888bcf9cb --- /dev/null +++ b/serving/numaflow-models/src/models/get_mono_vertex_daemon_deployment_req.rs @@ -0,0 +1,37 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetMonoVertexDaemonDeploymentReq { + #[serde(rename = "DefaultResources")] + pub default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + #[serde(rename = "Env")] + pub env: Vec, + #[serde(rename = "Image")] + pub image: String, + #[serde(rename = "PullPolicy")] + pub pull_policy: String, +} + +impl GetMonoVertexDaemonDeploymentReq { + pub fn new( + default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + env: Vec, + image: String, + pull_policy: String, + ) -> GetMonoVertexDaemonDeploymentReq { + GetMonoVertexDaemonDeploymentReq { + default_resources, + env, + image, + pull_policy, + } + } +} diff --git a/serving/numaflow-models/src/models/get_mono_vertex_pod_spec_req.rs b/serving/numaflow-models/src/models/get_mono_vertex_pod_spec_req.rs new file mode 100644 index 0000000000..18ba8d4ea5 --- /dev/null +++ b/serving/numaflow-models/src/models/get_mono_vertex_pod_spec_req.rs @@ -0,0 +1,37 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetMonoVertexPodSpecReq { + #[serde(rename = "DefaultResources")] + pub default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + #[serde(rename = "Env")] + pub env: Vec, + #[serde(rename = "Image")] + pub image: String, + #[serde(rename = "PullPolicy")] + pub pull_policy: String, +} + +impl GetMonoVertexPodSpecReq { + pub fn new( + default_resources: k8s_openapi::api::core::v1::ResourceRequirements, + env: Vec, + image: String, + pull_policy: String, + ) -> GetMonoVertexPodSpecReq { + GetMonoVertexPodSpecReq { + default_resources, + env, + image, + pull_policy, + } + } +} diff --git a/serving/numaflow-models/src/models/mod.rs b/serving/numaflow-models/src/models/mod.rs index 5725454d44..648964f907 100644 --- a/serving/numaflow-models/src/models/mod.rs +++ b/serving/numaflow-models/src/models/mod.rs @@ -40,6 +40,10 @@ pub mod get_jet_stream_service_spec_req; pub use self::get_jet_stream_service_spec_req::GetJetStreamServiceSpecReq; pub mod get_jet_stream_stateful_set_spec_req; pub use self::get_jet_stream_stateful_set_spec_req::GetJetStreamStatefulSetSpecReq; +pub mod get_mono_vertex_daemon_deployment_req; +pub use self::get_mono_vertex_daemon_deployment_req::GetMonoVertexDaemonDeploymentReq; +pub mod get_mono_vertex_pod_spec_req; +pub use self::get_mono_vertex_pod_spec_req::GetMonoVertexPodSpecReq; pub mod get_redis_service_spec_req; pub use self::get_redis_service_spec_req::GetRedisServiceSpecReq; pub mod get_redis_stateful_set_spec_req; @@ -82,6 +86,16 @@ pub mod log; pub use self::log::Log; pub mod metadata; pub use self::metadata::Metadata; +pub mod mono_vertex; +pub use self::mono_vertex::MonoVertex; +pub mod mono_vertex_limits; +pub use self::mono_vertex_limits::MonoVertexLimits; +pub mod mono_vertex_list; +pub use self::mono_vertex_list::MonoVertexList; +pub mod mono_vertex_spec; +pub use self::mono_vertex_spec::MonoVertexSpec; +pub mod mono_vertex_status; +pub use self::mono_vertex_status::MonoVertexStatus; pub mod native_redis; pub use self::native_redis::NativeRedis; pub mod nats_auth; diff --git a/serving/numaflow-models/src/models/mono_vertex.rs b/serving/numaflow-models/src/models/mono_vertex.rs new file mode 100644 index 0000000000..3d66a61036 --- /dev/null +++ b/serving/numaflow-models/src/models/mono_vertex.rs @@ -0,0 +1,37 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct MonoVertex { + /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + #[serde(rename = "apiVersion", skip_serializing_if = "Option::is_none")] + pub api_version: Option, + /// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(rename = "kind", skip_serializing_if = "Option::is_none")] + pub kind: Option, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option, + #[serde(rename = "spec")] + pub spec: Box, + #[serde(rename = "status", skip_serializing_if = "Option::is_none")] + pub status: Option>, +} + +impl MonoVertex { + pub fn new(spec: crate::models::MonoVertexSpec) -> MonoVertex { + MonoVertex { + api_version: None, + kind: None, + metadata: None, + spec: Box::new(spec), + status: None, + } + } +} diff --git a/serving/numaflow-models/src/models/mono_vertex_limits.rs b/serving/numaflow-models/src/models/mono_vertex_limits.rs new file mode 100644 index 0000000000..0a8814f50f --- /dev/null +++ b/serving/numaflow-models/src/models/mono_vertex_limits.rs @@ -0,0 +1,27 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct MonoVertexLimits { + /// Read batch size from the source. + #[serde(rename = "readBatchSize", skip_serializing_if = "Option::is_none")] + pub read_batch_size: Option, + #[serde(rename = "readTimeout", skip_serializing_if = "Option::is_none")] + pub read_timeout: Option, +} + +impl MonoVertexLimits { + pub fn new() -> MonoVertexLimits { + MonoVertexLimits { + read_batch_size: None, + read_timeout: None, + } + } +} diff --git a/serving/numaflow-models/src/models/mono_vertex_list.rs b/serving/numaflow-models/src/models/mono_vertex_list.rs new file mode 100644 index 0000000000..78bb9414f9 --- /dev/null +++ b/serving/numaflow-models/src/models/mono_vertex_list.rs @@ -0,0 +1,34 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct MonoVertexList { + /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + #[serde(rename = "apiVersion", skip_serializing_if = "Option::is_none")] + pub api_version: Option, + #[serde(rename = "items")] + pub items: Vec, + /// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(rename = "kind", skip_serializing_if = "Option::is_none")] + pub kind: Option, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option, +} + +impl MonoVertexList { + pub fn new(items: Vec) -> MonoVertexList { + MonoVertexList { + api_version: None, + items, + kind: None, + metadata: None, + } + } +} diff --git a/serving/numaflow-models/src/models/mono_vertex_spec.rs b/serving/numaflow-models/src/models/mono_vertex_spec.rs new file mode 100644 index 0000000000..7890717058 --- /dev/null +++ b/serving/numaflow-models/src/models/mono_vertex_spec.rs @@ -0,0 +1,103 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct MonoVertexSpec { + #[serde(rename = "affinity", skip_serializing_if = "Option::is_none")] + pub affinity: Option, + /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + #[serde( + rename = "automountServiceAccountToken", + skip_serializing_if = "Option::is_none" + )] + pub automount_service_account_token: Option, + #[serde(rename = "containerTemplate", skip_serializing_if = "Option::is_none")] + pub container_template: Option>, + #[serde(rename = "daemonTemplate", skip_serializing_if = "Option::is_none")] + pub daemon_template: Option>, + #[serde(rename = "dnsConfig", skip_serializing_if = "Option::is_none")] + pub dns_config: Option, + /// Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + #[serde(rename = "dnsPolicy", skip_serializing_if = "Option::is_none")] + pub dns_policy: Option, + /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + #[serde(rename = "imagePullSecrets", skip_serializing_if = "Option::is_none")] + pub image_pull_secrets: Option>, + /// List of customized init containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + #[serde(rename = "initContainers", skip_serializing_if = "Option::is_none")] + pub init_containers: Option>, + #[serde(rename = "limits", skip_serializing_if = "Option::is_none")] + pub limits: Option>, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option>, + /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + #[serde(rename = "nodeSelector", skip_serializing_if = "Option::is_none")] + pub node_selector: Option<::std::collections::HashMap>, + /// The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priority", skip_serializing_if = "Option::is_none")] + pub priority: Option, + /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] + pub priority_class_name: Option, + #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] + pub runtime_class_name: Option, + #[serde(rename = "scale", skip_serializing_if = "Option::is_none")] + pub scale: Option>, + #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] + pub security_context: Option, + /// ServiceAccountName applied to the pod + #[serde(rename = "serviceAccountName", skip_serializing_if = "Option::is_none")] + pub service_account_name: Option, + /// List of customized sidecar containers belonging to the pod. + #[serde(rename = "sidecars", skip_serializing_if = "Option::is_none")] + pub sidecars: Option>, + #[serde(rename = "sink", skip_serializing_if = "Option::is_none")] + pub sink: Option>, + #[serde(rename = "source", skip_serializing_if = "Option::is_none")] + pub source: Option>, + /// If specified, the pod's tolerations. + #[serde(rename = "tolerations", skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, + #[serde(rename = "volumes", skip_serializing_if = "Option::is_none")] + pub volumes: Option>, +} + +impl MonoVertexSpec { + pub fn new() -> MonoVertexSpec { + MonoVertexSpec { + affinity: None, + automount_service_account_token: None, + container_template: None, + daemon_template: None, + dns_config: None, + dns_policy: None, + image_pull_secrets: None, + init_containers: None, + limits: None, + metadata: None, + node_selector: None, + priority: None, + priority_class_name: None, + replicas: None, + runtime_class_name: None, + scale: None, + security_context: None, + service_account_name: None, + sidecars: None, + sink: None, + source: None, + tolerations: None, + volumes: None, + } + } +} diff --git a/serving/numaflow-models/src/models/mono_vertex_status.rs b/serving/numaflow-models/src/models/mono_vertex_status.rs new file mode 100644 index 0000000000..53695fd13a --- /dev/null +++ b/serving/numaflow-models/src/models/mono_vertex_status.rs @@ -0,0 +1,48 @@ +/* + * Numaflow + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: latest + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct MonoVertexStatus { + /// Conditions are the latest available observations of a resource's current state. + #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + #[serde(rename = "lastScaledAt", skip_serializing_if = "Option::is_none")] + pub last_scaled_at: Option, + #[serde(rename = "lastUpdated", skip_serializing_if = "Option::is_none")] + pub last_updated: Option, + #[serde(rename = "message", skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(rename = "observedGeneration", skip_serializing_if = "Option::is_none")] + pub observed_generation: Option, + #[serde(rename = "phase", skip_serializing_if = "Option::is_none")] + pub phase: Option, + #[serde(rename = "reason", skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(rename = "replicas")] + pub replicas: i64, + #[serde(rename = "selector", skip_serializing_if = "Option::is_none")] + pub selector: Option, +} + +impl MonoVertexStatus { + pub fn new(replicas: i64) -> MonoVertexStatus { + MonoVertexStatus { + conditions: None, + last_scaled_at: None, + last_updated: None, + message: None, + observed_generation: None, + phase: None, + reason: None, + replicas, + selector: None, + } + } +} From 02907042be9ddd1744aad8a238a377c58ef8d02b Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Thu, 8 Aug 2024 14:02:55 -0700 Subject: [PATCH 08/23] chore: update the proto files (#1910) Signed-off-by: Vigith Maurice --- hack/boilerplate/boilerplate.proto.txt | 16 ++++++++++++++++ pkg/apis/proto/map/v1/map.proto | 3 ++- pkg/apis/proto/mapstream/v1/mapstream.proto | 4 +++- pkg/apis/proto/reduce/v1/reduce.proto | 4 +++- .../proto/sessionreduce/v1/sessionreduce.proto | 3 ++- pkg/apis/proto/sideinput/v1/sideinput.proto | 2 +- pkg/apis/proto/sink/v1/sink.proto | 17 ++++++++++++++--- pkg/apis/proto/source/v1/source.proto | 6 +++++- 8 files changed, 46 insertions(+), 9 deletions(-) create mode 100644 hack/boilerplate/boilerplate.proto.txt diff --git a/hack/boilerplate/boilerplate.proto.txt b/hack/boilerplate/boilerplate.proto.txt new file mode 100644 index 0000000000..bd4685355c --- /dev/null +++ b/hack/boilerplate/boilerplate.proto.txt @@ -0,0 +1,16 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + diff --git a/pkg/apis/proto/map/v1/map.proto b/pkg/apis/proto/map/v1/map.proto index 48064b0f72..a7fd7d4ebd 100644 --- a/pkg/apis/proto/map/v1/map.proto +++ b/pkg/apis/proto/map/v1/map.proto @@ -40,6 +40,7 @@ message MapRequest { bytes value = 2; google.protobuf.Timestamp event_time = 3; google.protobuf.Timestamp watermark = 4; + map headers = 5; } /** @@ -59,4 +60,4 @@ message MapResponse { */ message ReadyResponse { bool ready = 1; -} \ No newline at end of file +} diff --git a/pkg/apis/proto/mapstream/v1/mapstream.proto b/pkg/apis/proto/mapstream/v1/mapstream.proto index 61693adba6..cb5cd47f0b 100644 --- a/pkg/apis/proto/mapstream/v1/mapstream.proto +++ b/pkg/apis/proto/mapstream/v1/mapstream.proto @@ -22,6 +22,7 @@ option java_package = "io.numaproj.numaflow.mapstream.v1"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; + package mapstream.v1; service MapStream { @@ -40,6 +41,7 @@ message MapStreamRequest { bytes value = 2; google.protobuf.Timestamp event_time = 3; google.protobuf.Timestamp watermark = 4; + map headers = 5; } /** @@ -59,4 +61,4 @@ message MapStreamResponse { */ message ReadyResponse { bool ready = 1; -} \ No newline at end of file +} diff --git a/pkg/apis/proto/reduce/v1/reduce.proto b/pkg/apis/proto/reduce/v1/reduce.proto index 07b934f587..2e2098543b 100644 --- a/pkg/apis/proto/reduce/v1/reduce.proto +++ b/pkg/apis/proto/reduce/v1/reduce.proto @@ -22,6 +22,7 @@ option java_package = "io.numaproj.numaflow.reduce.v1"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; + package reduce.v1; service Reduce { @@ -55,6 +56,7 @@ message ReduceRequest { bytes value = 2; google.protobuf.Timestamp event_time = 3; google.protobuf.Timestamp watermark = 4; + map headers = 5; } Payload payload = 1; @@ -94,4 +96,4 @@ message ReduceResponse { */ message ReadyResponse { bool ready = 1; -} \ No newline at end of file +} diff --git a/pkg/apis/proto/sessionreduce/v1/sessionreduce.proto b/pkg/apis/proto/sessionreduce/v1/sessionreduce.proto index d3c0100371..994f401097 100644 --- a/pkg/apis/proto/sessionreduce/v1/sessionreduce.proto +++ b/pkg/apis/proto/sessionreduce/v1/sessionreduce.proto @@ -67,6 +67,7 @@ message SessionReduceRequest { bytes value = 2; google.protobuf.Timestamp event_time = 3; google.protobuf.Timestamp watermark = 4; + map headers = 5; } Payload payload = 1; @@ -98,4 +99,4 @@ message SessionReduceResponse { */ message ReadyResponse { bool ready = 1; -} \ No newline at end of file +} diff --git a/pkg/apis/proto/sideinput/v1/sideinput.proto b/pkg/apis/proto/sideinput/v1/sideinput.proto index d3da45b94a..ab56a67fa9 100644 --- a/pkg/apis/proto/sideinput/v1/sideinput.proto +++ b/pkg/apis/proto/sideinput/v1/sideinput.proto @@ -56,4 +56,4 @@ message SideInputResponse { */ message ReadyResponse { bool ready = 1; -} \ No newline at end of file +} diff --git a/pkg/apis/proto/sink/v1/sink.proto b/pkg/apis/proto/sink/v1/sink.proto index a62760f1ea..caadaf66b6 100644 --- a/pkg/apis/proto/sink/v1/sink.proto +++ b/pkg/apis/proto/sink/v1/sink.proto @@ -22,6 +22,7 @@ option java_package = "io.numaproj.numaflow.sink.v1"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; + package sink.v1; service Sink { @@ -41,6 +42,7 @@ message SinkRequest { google.protobuf.Timestamp event_time = 3; google.protobuf.Timestamp watermark = 4; string id = 5; + map headers = 6; } /** @@ -50,6 +52,15 @@ message ReadyResponse { bool ready = 1; } +/* + * Status is the status of the response. + */ +enum Status { + SUCCESS = 0; + FAILURE = 1; + FALLBACK = 2; +} + /** * SinkResponse is the individual response of each message written to the sink. */ @@ -57,10 +68,10 @@ message SinkResponse { message Result { // id is the ID of the message, can be used to uniquely identify the message. string id = 1; - // success denotes the status of persisting to disk. if set to false, it means writing to sink for the message failed. - bool success = 2; + // status denotes the status of persisting to sink. It can be SUCCESS, FAILURE, or FALLBACK. + Status status = 2; // err_msg is the error message, set it if success is set to false. string err_msg = 3; } repeated Result results = 1; -} \ No newline at end of file +} diff --git a/pkg/apis/proto/source/v1/source.proto b/pkg/apis/proto/source/v1/source.proto index 7520b98c5a..0fd0bdbb37 100644 --- a/pkg/apis/proto/source/v1/source.proto +++ b/pkg/apis/proto/source/v1/source.proto @@ -79,6 +79,10 @@ message ReadResponse { // We add this optional field to support the use case where the user defined source can provide keys for the datum. // e.g. Kafka and Redis Stream message usually include information about the keys. repeated string keys = 4; + // Optional list of headers associated with the datum. + // Headers are the metadata associated with the datum. + // e.g. Kafka and Redis Stream message usually include information about the headers. + map headers = 5; } // Required field holding the result. Result result = 1; @@ -165,4 +169,4 @@ message Offset { // It is useful for sources that have multiple partitions. e.g. Kafka. // If the partition_id is not specified, it is assumed that the source has a single partition. int32 partition_id = 2; -} \ No newline at end of file +} From 032dffdfa4b8b5895a4b2947a999aa293067c551 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Thu, 8 Aug 2024 22:10:12 -0700 Subject: [PATCH 09/23] chore: rust codegen templates (#1913) Signed-off-by: Derek Wang --- serving/numaflow-models/Cargo.toml | 7 +-- serving/numaflow-models/Makefile | 6 +- .../numaflow-models/src/apis/configuration.rs | 24 ++++--- .../src/models/abstract_pod_template.rs | 24 ++++--- .../src/models/abstract_sink.rs | 24 ++++--- .../src/models/abstract_vertex.rs | 24 ++++--- .../src/models/authorization.rs | 24 ++++--- .../numaflow-models/src/models/basic_auth.rs | 24 ++++--- .../numaflow-models/src/models/blackhole.rs | 24 ++++--- .../src/models/buffer_service_config.rs | 24 ++++--- .../src/models/combined_edge.rs | 24 ++++--- .../numaflow-models/src/models/container.rs | 24 ++++--- .../src/models/container_builder.rs | 24 ++++--- .../src/models/container_template.rs | 24 ++++--- .../src/models/daemon_template.rs | 24 ++++--- serving/numaflow-models/src/models/edge.rs | 24 ++++--- .../src/models/fixed_window.rs | 24 ++++--- .../src/models/forward_conditions.rs | 24 ++++--- .../numaflow-models/src/models/function.rs | 24 ++++--- .../src/models/generator_source.rs | 24 ++++--- .../src/models/get_container_req.rs | 24 ++++--- .../src/models/get_daemon_deployment_req.rs | 24 ++++--- .../models/get_jet_stream_service_spec_req.rs | 24 ++++--- .../get_jet_stream_stateful_set_spec_req.rs | 24 ++++--- .../get_mono_vertex_daemon_deployment_req.rs | 24 ++++--- .../models/get_mono_vertex_pod_spec_req.rs | 24 ++++--- .../src/models/get_redis_service_spec_req.rs | 24 ++++--- .../models/get_redis_stateful_set_spec_req.rs | 24 ++++--- .../models/get_side_input_deployment_req.rs | 24 ++++--- .../src/models/get_vertex_pod_spec_req.rs | 24 ++++--- .../numaflow-models/src/models/group_by.rs | 24 ++++--- serving/numaflow-models/src/models/gssapi.rs | 24 ++++--- .../numaflow-models/src/models/http_source.rs | 24 ++++--- .../numaflow-models/src/models/idle_source.rs | 24 ++++--- .../src/models/inter_step_buffer_service.rs | 24 ++++--- .../models/inter_step_buffer_service_list.rs | 24 ++++--- .../models/inter_step_buffer_service_spec.rs | 24 ++++--- .../inter_step_buffer_service_status.rs | 24 ++++--- .../src/models/jet_stream_buffer_service.rs | 24 ++++--- .../src/models/jet_stream_config.rs | 24 ++++--- .../src/models/jet_stream_source.rs | 24 ++++--- .../src/models/job_template.rs | 24 ++++--- .../numaflow-models/src/models/kafka_sink.rs | 24 ++++--- .../src/models/kafka_source.rs | 24 ++++--- .../numaflow-models/src/models/lifecycle.rs | 24 ++++--- serving/numaflow-models/src/models/log.rs | 24 ++++--- .../numaflow-models/src/models/metadata.rs | 24 ++++--- .../numaflow-models/src/models/mono_vertex.rs | 24 ++++--- .../src/models/mono_vertex_limits.rs | 24 ++++--- .../src/models/mono_vertex_list.rs | 24 ++++--- .../src/models/mono_vertex_spec.rs | 24 ++++--- .../src/models/mono_vertex_status.rs | 24 ++++--- .../src/models/native_redis.rs | 24 ++++--- .../numaflow-models/src/models/nats_auth.rs | 24 ++++--- .../numaflow-models/src/models/nats_source.rs | 24 ++++--- .../numaflow-models/src/models/no_store.rs | 24 ++++--- .../numaflow-models/src/models/pbq_storage.rs | 24 ++++--- .../src/models/persistence_strategy.rs | 24 ++++--- .../numaflow-models/src/models/pipeline.rs | 24 ++++--- .../src/models/pipeline_limits.rs | 24 ++++--- .../src/models/pipeline_list.rs | 24 ++++--- .../src/models/pipeline_spec.rs | 24 ++++--- .../src/models/pipeline_status.rs | 24 ++++--- .../src/models/redis_buffer_service.rs | 24 ++++--- .../src/models/redis_config.rs | 24 ++++--- .../src/models/redis_settings.rs | 24 ++++--- serving/numaflow-models/src/models/sasl.rs | 24 ++++--- .../numaflow-models/src/models/sasl_plain.rs | 24 ++++--- serving/numaflow-models/src/models/scale.rs | 24 ++++--- .../src/models/serving_source.rs | 24 ++++--- .../src/models/serving_store.rs | 24 ++++--- .../src/models/session_window.rs | 24 ++++--- .../numaflow-models/src/models/side_input.rs | 24 ++++--- .../src/models/side_input_trigger.rs | 24 ++++--- .../models/side_inputs_manager_template.rs | 24 ++++--- serving/numaflow-models/src/models/sink.rs | 24 ++++--- .../src/models/sliding_window.rs | 24 ++++--- serving/numaflow-models/src/models/source.rs | 24 ++++--- serving/numaflow-models/src/models/status.rs | 24 ++++--- .../src/models/tag_conditions.rs | 24 ++++--- .../numaflow-models/src/models/templates.rs | 24 ++++--- serving/numaflow-models/src/models/tls.rs | 24 ++++--- .../numaflow-models/src/models/transformer.rs | 24 ++++--- serving/numaflow-models/src/models/ud_sink.rs | 24 ++++--- .../numaflow-models/src/models/ud_source.rs | 24 ++++--- .../src/models/ud_transformer.rs | 24 ++++--- serving/numaflow-models/src/models/udf.rs | 24 ++++--- serving/numaflow-models/src/models/vertex.rs | 24 ++++--- .../src/models/vertex_instance.rs | 24 ++++--- .../src/models/vertex_limits.rs | 24 ++++--- .../numaflow-models/src/models/vertex_list.rs | 24 ++++--- .../numaflow-models/src/models/vertex_spec.rs | 24 ++++--- .../src/models/vertex_status.rs | 24 ++++--- .../src/models/vertex_template.rs | 24 ++++--- .../numaflow-models/src/models/watermark.rs | 24 ++++--- serving/numaflow-models/src/models/window.rs | 24 ++++--- .../numaflow-models/templates/Cargo.mustache | 63 +++++++++++++++++++ .../templates/partial_header.mustache | 17 +++++ 98 files changed, 1590 insertions(+), 759 deletions(-) create mode 100644 serving/numaflow-models/templates/Cargo.mustache create mode 100644 serving/numaflow-models/templates/partial_header.mustache diff --git a/serving/numaflow-models/Cargo.toml b/serving/numaflow-models/Cargo.toml index 1585b7ee0e..6a38e0cc01 100644 --- a/serving/numaflow-models/Cargo.toml +++ b/serving/numaflow-models/Cargo.toml @@ -1,10 +1,9 @@ [package] name = "numaflow-models" version = "0.0.0-pre" -authors = ["Numaflow Developers"] -description = "No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)" -# Override this license by providing a License Object in the OpenAPI. -license = "Apache License 2.0" +authors = ["The Numaproj Authors"] +description = "Numaflow models" +license = "Apache-2.0 license" edition = "2021" [dependencies] diff --git a/serving/numaflow-models/Makefile b/serving/numaflow-models/Makefile index ab2155f3a1..5ff0bf7e53 100644 --- a/serving/numaflow-models/Makefile +++ b/serving/numaflow-models/Makefile @@ -7,13 +7,13 @@ SDK_VERSION := $(shell if [[ "$(VERSION)" =~ ^v[0-9]+\.[0-9]+\.[0-9]+.* ]]; the # Somehow type-mappings stopped working starting from v7.4.0 GENERATOR_VERSION := v7.3.0 -DOCKER = docker run --rm -v `pwd -P`:/base --workdir /base +DOCKER = docker run --rm --user $(shell id -u):$(shell id -g) -v `pwd -P`:/base --workdir /base publish: generate echo TODO generate: - rm -Rf ./docs ./test ./numaflow/models/ ./numaflow/model/ + rm -Rf ./docs ./src mkdir -p ./dist cat ../../api/openapi-spec/swagger.json | ./hack/swaggerfilter.py io.numaproj.numaflow | \ sed 's/io.k8s.api.core.v1./CoreV1/' | \ @@ -25,6 +25,7 @@ generate: generate \ -i /base/dist/swagger.json \ -g rust \ + -t /base/templates \ -o /base \ --remove-operation-id-prefix \ --model-name-prefix '' \ @@ -58,7 +59,6 @@ generate: --type-mappings ResourceQuantity="k8s_openapi::apimachinery::pkg::api::resource::Quantity" \ --generate-alias-as-model - sed -e 's/edition = "2018"/edition = "2021"/g' -e 's/authors =.*/authors = \["Numaflow Developers"\]/' -e 's/license =.*/license = "Apache License 2.0"/' Cargo.toml > tmp && mv tmp Cargo.toml cargo add kube cargo add k8s-openapi --features v1_29 cargo fmt diff --git a/serving/numaflow-models/src/apis/configuration.rs b/serving/numaflow-models/src/apis/configuration.rs index da73e26e0b..0b61f68f34 100644 --- a/serving/numaflow-models/src/apis/configuration.rs +++ b/serving/numaflow-models/src/apis/configuration.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Debug, Clone)] pub struct Configuration { diff --git a/serving/numaflow-models/src/models/abstract_pod_template.rs b/serving/numaflow-models/src/models/abstract_pod_template.rs index 6de4149ab1..612ade86b6 100644 --- a/serving/numaflow-models/src/models/abstract_pod_template.rs +++ b/serving/numaflow-models/src/models/abstract_pod_template.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// AbstractPodTemplate : AbstractPodTemplate provides a template for pod customization in vertices, daemon deployments and so on. diff --git a/serving/numaflow-models/src/models/abstract_sink.rs b/serving/numaflow-models/src/models/abstract_sink.rs index ab8f2ba058..d6607a699b 100644 --- a/serving/numaflow-models/src/models/abstract_sink.rs +++ b/serving/numaflow-models/src/models/abstract_sink.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AbstractSink { diff --git a/serving/numaflow-models/src/models/abstract_vertex.rs b/serving/numaflow-models/src/models/abstract_vertex.rs index 62fc1240ce..a93bcc3ac0 100644 --- a/serving/numaflow-models/src/models/abstract_vertex.rs +++ b/serving/numaflow-models/src/models/abstract_vertex.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AbstractVertex { diff --git a/serving/numaflow-models/src/models/authorization.rs b/serving/numaflow-models/src/models/authorization.rs index f1242768d2..de97eb8c7c 100644 --- a/serving/numaflow-models/src/models/authorization.rs +++ b/serving/numaflow-models/src/models/authorization.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Authorization { diff --git a/serving/numaflow-models/src/models/basic_auth.rs b/serving/numaflow-models/src/models/basic_auth.rs index 1b5eea25d8..2ded8f7b04 100644 --- a/serving/numaflow-models/src/models/basic_auth.rs +++ b/serving/numaflow-models/src/models/basic_auth.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// BasicAuth : BasicAuth represents the basic authentication approach which contains a user name and a password. diff --git a/serving/numaflow-models/src/models/blackhole.rs b/serving/numaflow-models/src/models/blackhole.rs index 145871aa16..e685e38dad 100644 --- a/serving/numaflow-models/src/models/blackhole.rs +++ b/serving/numaflow-models/src/models/blackhole.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// Blackhole : Blackhole is a sink to emulate /dev/null diff --git a/serving/numaflow-models/src/models/buffer_service_config.rs b/serving/numaflow-models/src/models/buffer_service_config.rs index 24e621c1ea..4436b3ae20 100644 --- a/serving/numaflow-models/src/models/buffer_service_config.rs +++ b/serving/numaflow-models/src/models/buffer_service_config.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BufferServiceConfig { diff --git a/serving/numaflow-models/src/models/combined_edge.rs b/serving/numaflow-models/src/models/combined_edge.rs index 94d478727c..3350ed8100 100644 --- a/serving/numaflow-models/src/models/combined_edge.rs +++ b/serving/numaflow-models/src/models/combined_edge.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// CombinedEdge : CombinedEdge is a combination of Edge and some other properties such as vertex type, partitions, limits. It's used to decorate the fromEdges and toEdges of the generated Vertex objects, so that in the vertex pod, it knows the properties of the connected vertices, for example, how many partitioned buffers I should write to, what is the write buffer length, etc. diff --git a/serving/numaflow-models/src/models/container.rs b/serving/numaflow-models/src/models/container.rs index 4d3d2730a0..69f3206a00 100644 --- a/serving/numaflow-models/src/models/container.rs +++ b/serving/numaflow-models/src/models/container.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// Container : Container is used to define the container properties for user-defined functions, sinks, etc. diff --git a/serving/numaflow-models/src/models/container_builder.rs b/serving/numaflow-models/src/models/container_builder.rs index 35898065d5..7948ae73f5 100644 --- a/serving/numaflow-models/src/models/container_builder.rs +++ b/serving/numaflow-models/src/models/container_builder.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ContainerBuilder { diff --git a/serving/numaflow-models/src/models/container_template.rs b/serving/numaflow-models/src/models/container_template.rs index 4c85b7a7f6..71bd4e068c 100644 --- a/serving/numaflow-models/src/models/container_template.rs +++ b/serving/numaflow-models/src/models/container_template.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// ContainerTemplate : ContainerTemplate defines customized spec for a container diff --git a/serving/numaflow-models/src/models/daemon_template.rs b/serving/numaflow-models/src/models/daemon_template.rs index 97290e6fac..5d03240b86 100644 --- a/serving/numaflow-models/src/models/daemon_template.rs +++ b/serving/numaflow-models/src/models/daemon_template.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DaemonTemplate { diff --git a/serving/numaflow-models/src/models/edge.rs b/serving/numaflow-models/src/models/edge.rs index 95a3b9742b..ed16372658 100644 --- a/serving/numaflow-models/src/models/edge.rs +++ b/serving/numaflow-models/src/models/edge.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Edge { diff --git a/serving/numaflow-models/src/models/fixed_window.rs b/serving/numaflow-models/src/models/fixed_window.rs index 76a3a9dc1c..1095cfc15a 100644 --- a/serving/numaflow-models/src/models/fixed_window.rs +++ b/serving/numaflow-models/src/models/fixed_window.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// FixedWindow : FixedWindow describes a fixed window diff --git a/serving/numaflow-models/src/models/forward_conditions.rs b/serving/numaflow-models/src/models/forward_conditions.rs index ea99358902..383c605034 100644 --- a/serving/numaflow-models/src/models/forward_conditions.rs +++ b/serving/numaflow-models/src/models/forward_conditions.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ForwardConditions { diff --git a/serving/numaflow-models/src/models/function.rs b/serving/numaflow-models/src/models/function.rs index 8081c827fe..820ef68e29 100644 --- a/serving/numaflow-models/src/models/function.rs +++ b/serving/numaflow-models/src/models/function.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Function { diff --git a/serving/numaflow-models/src/models/generator_source.rs b/serving/numaflow-models/src/models/generator_source.rs index 268652d0e3..063f146e15 100644 --- a/serving/numaflow-models/src/models/generator_source.rs +++ b/serving/numaflow-models/src/models/generator_source.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GeneratorSource { diff --git a/serving/numaflow-models/src/models/get_container_req.rs b/serving/numaflow-models/src/models/get_container_req.rs index 2d97d73f1b..2214ca06bd 100644 --- a/serving/numaflow-models/src/models/get_container_req.rs +++ b/serving/numaflow-models/src/models/get_container_req.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetContainerReq { diff --git a/serving/numaflow-models/src/models/get_daemon_deployment_req.rs b/serving/numaflow-models/src/models/get_daemon_deployment_req.rs index 2c20b6eba2..32cfa45d09 100644 --- a/serving/numaflow-models/src/models/get_daemon_deployment_req.rs +++ b/serving/numaflow-models/src/models/get_daemon_deployment_req.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetDaemonDeploymentReq { diff --git a/serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs b/serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs index e6168e7d3e..6d0919e056 100644 --- a/serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs +++ b/serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetJetStreamServiceSpecReq { diff --git a/serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs b/serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs index ffa11c5bbe..1815ca0302 100644 --- a/serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs +++ b/serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetJetStreamStatefulSetSpecReq { diff --git a/serving/numaflow-models/src/models/get_mono_vertex_daemon_deployment_req.rs b/serving/numaflow-models/src/models/get_mono_vertex_daemon_deployment_req.rs index 6888bcf9cb..bbe888c850 100644 --- a/serving/numaflow-models/src/models/get_mono_vertex_daemon_deployment_req.rs +++ b/serving/numaflow-models/src/models/get_mono_vertex_daemon_deployment_req.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetMonoVertexDaemonDeploymentReq { diff --git a/serving/numaflow-models/src/models/get_mono_vertex_pod_spec_req.rs b/serving/numaflow-models/src/models/get_mono_vertex_pod_spec_req.rs index 18ba8d4ea5..e8ae0406a6 100644 --- a/serving/numaflow-models/src/models/get_mono_vertex_pod_spec_req.rs +++ b/serving/numaflow-models/src/models/get_mono_vertex_pod_spec_req.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetMonoVertexPodSpecReq { diff --git a/serving/numaflow-models/src/models/get_redis_service_spec_req.rs b/serving/numaflow-models/src/models/get_redis_service_spec_req.rs index df584ea71a..7a8843cc29 100644 --- a/serving/numaflow-models/src/models/get_redis_service_spec_req.rs +++ b/serving/numaflow-models/src/models/get_redis_service_spec_req.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetRedisServiceSpecReq { diff --git a/serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs b/serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs index 81cc11b7c9..151f774178 100644 --- a/serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs +++ b/serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetRedisStatefulSetSpecReq { diff --git a/serving/numaflow-models/src/models/get_side_input_deployment_req.rs b/serving/numaflow-models/src/models/get_side_input_deployment_req.rs index f548abcf4d..8efa9c5da9 100644 --- a/serving/numaflow-models/src/models/get_side_input_deployment_req.rs +++ b/serving/numaflow-models/src/models/get_side_input_deployment_req.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetSideInputDeploymentReq { diff --git a/serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs b/serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs index bd976cf1f9..7bc58591ef 100644 --- a/serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs +++ b/serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetVertexPodSpecReq { diff --git a/serving/numaflow-models/src/models/group_by.rs b/serving/numaflow-models/src/models/group_by.rs index 915e8a5bca..1d19702f6f 100644 --- a/serving/numaflow-models/src/models/group_by.rs +++ b/serving/numaflow-models/src/models/group_by.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// GroupBy : GroupBy indicates it is a reducer UDF diff --git a/serving/numaflow-models/src/models/gssapi.rs b/serving/numaflow-models/src/models/gssapi.rs index d040f83cb7..83292ff076 100644 --- a/serving/numaflow-models/src/models/gssapi.rs +++ b/serving/numaflow-models/src/models/gssapi.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// Gssapi : GSSAPI represents a SASL GSSAPI config diff --git a/serving/numaflow-models/src/models/http_source.rs b/serving/numaflow-models/src/models/http_source.rs index e5178c86e3..026c335834 100644 --- a/serving/numaflow-models/src/models/http_source.rs +++ b/serving/numaflow-models/src/models/http_source.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct HttpSource { diff --git a/serving/numaflow-models/src/models/idle_source.rs b/serving/numaflow-models/src/models/idle_source.rs index cbe629ae9f..3396ddda9f 100644 --- a/serving/numaflow-models/src/models/idle_source.rs +++ b/serving/numaflow-models/src/models/idle_source.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IdleSource { diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service.rs b/serving/numaflow-models/src/models/inter_step_buffer_service.rs index 8e6a9e20f4..254c322987 100644 --- a/serving/numaflow-models/src/models/inter_step_buffer_service.rs +++ b/serving/numaflow-models/src/models/inter_step_buffer_service.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InterStepBufferService { diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service_list.rs b/serving/numaflow-models/src/models/inter_step_buffer_service_list.rs index 4ef7a04c49..cca0833165 100644 --- a/serving/numaflow-models/src/models/inter_step_buffer_service_list.rs +++ b/serving/numaflow-models/src/models/inter_step_buffer_service_list.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// InterStepBufferServiceList : InterStepBufferServiceList is the list of InterStepBufferService resources diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs b/serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs index 2de93da314..6302564a7e 100644 --- a/serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs +++ b/serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InterStepBufferServiceSpec { diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service_status.rs b/serving/numaflow-models/src/models/inter_step_buffer_service_status.rs index 43c7cd16cf..655d31c161 100644 --- a/serving/numaflow-models/src/models/inter_step_buffer_service_status.rs +++ b/serving/numaflow-models/src/models/inter_step_buffer_service_status.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InterStepBufferServiceStatus { diff --git a/serving/numaflow-models/src/models/jet_stream_buffer_service.rs b/serving/numaflow-models/src/models/jet_stream_buffer_service.rs index de5392a051..96dce5855c 100644 --- a/serving/numaflow-models/src/models/jet_stream_buffer_service.rs +++ b/serving/numaflow-models/src/models/jet_stream_buffer_service.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JetStreamBufferService { diff --git a/serving/numaflow-models/src/models/jet_stream_config.rs b/serving/numaflow-models/src/models/jet_stream_config.rs index d62ceab186..f00c958499 100644 --- a/serving/numaflow-models/src/models/jet_stream_config.rs +++ b/serving/numaflow-models/src/models/jet_stream_config.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JetStreamConfig { diff --git a/serving/numaflow-models/src/models/jet_stream_source.rs b/serving/numaflow-models/src/models/jet_stream_source.rs index c1040259e3..4c6ad1f69a 100644 --- a/serving/numaflow-models/src/models/jet_stream_source.rs +++ b/serving/numaflow-models/src/models/jet_stream_source.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JetStreamSource { diff --git a/serving/numaflow-models/src/models/job_template.rs b/serving/numaflow-models/src/models/job_template.rs index 9ffe8e162e..6c273d4db0 100644 --- a/serving/numaflow-models/src/models/job_template.rs +++ b/serving/numaflow-models/src/models/job_template.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobTemplate { diff --git a/serving/numaflow-models/src/models/kafka_sink.rs b/serving/numaflow-models/src/models/kafka_sink.rs index 99dabd7a4e..c58d41025e 100644 --- a/serving/numaflow-models/src/models/kafka_sink.rs +++ b/serving/numaflow-models/src/models/kafka_sink.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct KafkaSink { diff --git a/serving/numaflow-models/src/models/kafka_source.rs b/serving/numaflow-models/src/models/kafka_source.rs index b0771d8e33..acdd1eee98 100644 --- a/serving/numaflow-models/src/models/kafka_source.rs +++ b/serving/numaflow-models/src/models/kafka_source.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct KafkaSource { diff --git a/serving/numaflow-models/src/models/lifecycle.rs b/serving/numaflow-models/src/models/lifecycle.rs index 75065cf816..9223e0c377 100644 --- a/serving/numaflow-models/src/models/lifecycle.rs +++ b/serving/numaflow-models/src/models/lifecycle.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Lifecycle { diff --git a/serving/numaflow-models/src/models/log.rs b/serving/numaflow-models/src/models/log.rs index c1452a70ff..0e06511a5d 100644 --- a/serving/numaflow-models/src/models/log.rs +++ b/serving/numaflow-models/src/models/log.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Log {} diff --git a/serving/numaflow-models/src/models/metadata.rs b/serving/numaflow-models/src/models/metadata.rs index 56c214a01e..c0f8eb1efa 100644 --- a/serving/numaflow-models/src/models/metadata.rs +++ b/serving/numaflow-models/src/models/metadata.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Metadata { diff --git a/serving/numaflow-models/src/models/mono_vertex.rs b/serving/numaflow-models/src/models/mono_vertex.rs index 3d66a61036..b3411fe686 100644 --- a/serving/numaflow-models/src/models/mono_vertex.rs +++ b/serving/numaflow-models/src/models/mono_vertex.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MonoVertex { diff --git a/serving/numaflow-models/src/models/mono_vertex_limits.rs b/serving/numaflow-models/src/models/mono_vertex_limits.rs index 0a8814f50f..03731ca1c9 100644 --- a/serving/numaflow-models/src/models/mono_vertex_limits.rs +++ b/serving/numaflow-models/src/models/mono_vertex_limits.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MonoVertexLimits { diff --git a/serving/numaflow-models/src/models/mono_vertex_list.rs b/serving/numaflow-models/src/models/mono_vertex_list.rs index 78bb9414f9..5c67642586 100644 --- a/serving/numaflow-models/src/models/mono_vertex_list.rs +++ b/serving/numaflow-models/src/models/mono_vertex_list.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MonoVertexList { diff --git a/serving/numaflow-models/src/models/mono_vertex_spec.rs b/serving/numaflow-models/src/models/mono_vertex_spec.rs index 7890717058..1041fbafb7 100644 --- a/serving/numaflow-models/src/models/mono_vertex_spec.rs +++ b/serving/numaflow-models/src/models/mono_vertex_spec.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MonoVertexSpec { diff --git a/serving/numaflow-models/src/models/mono_vertex_status.rs b/serving/numaflow-models/src/models/mono_vertex_status.rs index 53695fd13a..86c0cb489f 100644 --- a/serving/numaflow-models/src/models/mono_vertex_status.rs +++ b/serving/numaflow-models/src/models/mono_vertex_status.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MonoVertexStatus { diff --git a/serving/numaflow-models/src/models/native_redis.rs b/serving/numaflow-models/src/models/native_redis.rs index a22fb37436..3200cd7731 100644 --- a/serving/numaflow-models/src/models/native_redis.rs +++ b/serving/numaflow-models/src/models/native_redis.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NativeRedis { diff --git a/serving/numaflow-models/src/models/nats_auth.rs b/serving/numaflow-models/src/models/nats_auth.rs index 7b085650f9..ecf40749fd 100644 --- a/serving/numaflow-models/src/models/nats_auth.rs +++ b/serving/numaflow-models/src/models/nats_auth.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// NatsAuth : NatsAuth defines how to authenticate the nats access diff --git a/serving/numaflow-models/src/models/nats_source.rs b/serving/numaflow-models/src/models/nats_source.rs index 666fcbb884..97b3bae591 100644 --- a/serving/numaflow-models/src/models/nats_source.rs +++ b/serving/numaflow-models/src/models/nats_source.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NatsSource { diff --git a/serving/numaflow-models/src/models/no_store.rs b/serving/numaflow-models/src/models/no_store.rs index f91247af2f..e185d59af3 100644 --- a/serving/numaflow-models/src/models/no_store.rs +++ b/serving/numaflow-models/src/models/no_store.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// NoStore : NoStore means there will be no persistence storage and there will be data loss during pod restarts. Use this option only if you do not care about correctness (e.g., approx statistics pipeline like sampling rate, etc.). diff --git a/serving/numaflow-models/src/models/pbq_storage.rs b/serving/numaflow-models/src/models/pbq_storage.rs index c01ffe9eba..e84d765c08 100644 --- a/serving/numaflow-models/src/models/pbq_storage.rs +++ b/serving/numaflow-models/src/models/pbq_storage.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// PbqStorage : PBQStorage defines the persistence configuration for a vertex. diff --git a/serving/numaflow-models/src/models/persistence_strategy.rs b/serving/numaflow-models/src/models/persistence_strategy.rs index 3541e83b91..d558da3ee8 100644 --- a/serving/numaflow-models/src/models/persistence_strategy.rs +++ b/serving/numaflow-models/src/models/persistence_strategy.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// PersistenceStrategy : PersistenceStrategy defines the strategy of persistence diff --git a/serving/numaflow-models/src/models/pipeline.rs b/serving/numaflow-models/src/models/pipeline.rs index 52db105f24..e7d3640661 100644 --- a/serving/numaflow-models/src/models/pipeline.rs +++ b/serving/numaflow-models/src/models/pipeline.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Pipeline { diff --git a/serving/numaflow-models/src/models/pipeline_limits.rs b/serving/numaflow-models/src/models/pipeline_limits.rs index c4a158e6ae..7d2d8717db 100644 --- a/serving/numaflow-models/src/models/pipeline_limits.rs +++ b/serving/numaflow-models/src/models/pipeline_limits.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PipelineLimits { diff --git a/serving/numaflow-models/src/models/pipeline_list.rs b/serving/numaflow-models/src/models/pipeline_list.rs index 7fc027c1a7..66b800f6c0 100644 --- a/serving/numaflow-models/src/models/pipeline_list.rs +++ b/serving/numaflow-models/src/models/pipeline_list.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PipelineList { diff --git a/serving/numaflow-models/src/models/pipeline_spec.rs b/serving/numaflow-models/src/models/pipeline_spec.rs index c33cdb1f80..884521cadd 100644 --- a/serving/numaflow-models/src/models/pipeline_spec.rs +++ b/serving/numaflow-models/src/models/pipeline_spec.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PipelineSpec { diff --git a/serving/numaflow-models/src/models/pipeline_status.rs b/serving/numaflow-models/src/models/pipeline_status.rs index 8756844a7c..6e061bbdbe 100644 --- a/serving/numaflow-models/src/models/pipeline_status.rs +++ b/serving/numaflow-models/src/models/pipeline_status.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PipelineStatus { diff --git a/serving/numaflow-models/src/models/redis_buffer_service.rs b/serving/numaflow-models/src/models/redis_buffer_service.rs index 5dc527b391..c69a826ffb 100644 --- a/serving/numaflow-models/src/models/redis_buffer_service.rs +++ b/serving/numaflow-models/src/models/redis_buffer_service.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RedisBufferService { diff --git a/serving/numaflow-models/src/models/redis_config.rs b/serving/numaflow-models/src/models/redis_config.rs index cf563ab9ec..90c6a9e917 100644 --- a/serving/numaflow-models/src/models/redis_config.rs +++ b/serving/numaflow-models/src/models/redis_config.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RedisConfig { diff --git a/serving/numaflow-models/src/models/redis_settings.rs b/serving/numaflow-models/src/models/redis_settings.rs index f5b4b4c2b7..c91cc07088 100644 --- a/serving/numaflow-models/src/models/redis_settings.rs +++ b/serving/numaflow-models/src/models/redis_settings.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RedisSettings { diff --git a/serving/numaflow-models/src/models/sasl.rs b/serving/numaflow-models/src/models/sasl.rs index 2f56e8cce0..a2941f2e93 100644 --- a/serving/numaflow-models/src/models/sasl.rs +++ b/serving/numaflow-models/src/models/sasl.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Sasl { diff --git a/serving/numaflow-models/src/models/sasl_plain.rs b/serving/numaflow-models/src/models/sasl_plain.rs index 7719720dd3..135def0519 100644 --- a/serving/numaflow-models/src/models/sasl_plain.rs +++ b/serving/numaflow-models/src/models/sasl_plain.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SaslPlain { diff --git a/serving/numaflow-models/src/models/scale.rs b/serving/numaflow-models/src/models/scale.rs index 03673cca73..5037968d7c 100644 --- a/serving/numaflow-models/src/models/scale.rs +++ b/serving/numaflow-models/src/models/scale.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// Scale : Scale defines the parameters for autoscaling. diff --git a/serving/numaflow-models/src/models/serving_source.rs b/serving/numaflow-models/src/models/serving_source.rs index 25703a7031..8d70090694 100644 --- a/serving/numaflow-models/src/models/serving_source.rs +++ b/serving/numaflow-models/src/models/serving_source.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// ServingSource : ServingSource is the HTTP endpoint for Numaflow. diff --git a/serving/numaflow-models/src/models/serving_store.rs b/serving/numaflow-models/src/models/serving_store.rs index 7a3086aea9..591ae9214e 100644 --- a/serving/numaflow-models/src/models/serving_store.rs +++ b/serving/numaflow-models/src/models/serving_store.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// ServingStore : ServingStore to track and store data and metadata for tracking and serving. diff --git a/serving/numaflow-models/src/models/session_window.rs b/serving/numaflow-models/src/models/session_window.rs index 551f164cde..986d623f1c 100644 --- a/serving/numaflow-models/src/models/session_window.rs +++ b/serving/numaflow-models/src/models/session_window.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// SessionWindow : SessionWindow describes a session window diff --git a/serving/numaflow-models/src/models/side_input.rs b/serving/numaflow-models/src/models/side_input.rs index 275af684af..f388d21e84 100644 --- a/serving/numaflow-models/src/models/side_input.rs +++ b/serving/numaflow-models/src/models/side_input.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// SideInput : SideInput defines information of a Side Input diff --git a/serving/numaflow-models/src/models/side_input_trigger.rs b/serving/numaflow-models/src/models/side_input_trigger.rs index 497f5461b3..236dd487c6 100644 --- a/serving/numaflow-models/src/models/side_input_trigger.rs +++ b/serving/numaflow-models/src/models/side_input_trigger.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SideInputTrigger { diff --git a/serving/numaflow-models/src/models/side_inputs_manager_template.rs b/serving/numaflow-models/src/models/side_inputs_manager_template.rs index 82f7afe5d1..d1b4fa8cb4 100644 --- a/serving/numaflow-models/src/models/side_inputs_manager_template.rs +++ b/serving/numaflow-models/src/models/side_inputs_manager_template.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SideInputsManagerTemplate { diff --git a/serving/numaflow-models/src/models/sink.rs b/serving/numaflow-models/src/models/sink.rs index 6cd57bb2ec..aa3402f42e 100644 --- a/serving/numaflow-models/src/models/sink.rs +++ b/serving/numaflow-models/src/models/sink.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Sink { diff --git a/serving/numaflow-models/src/models/sliding_window.rs b/serving/numaflow-models/src/models/sliding_window.rs index 4d2f9a06c7..9cb6089c28 100644 --- a/serving/numaflow-models/src/models/sliding_window.rs +++ b/serving/numaflow-models/src/models/sliding_window.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// SlidingWindow : SlidingWindow describes a sliding window diff --git a/serving/numaflow-models/src/models/source.rs b/serving/numaflow-models/src/models/source.rs index b7331b4942..a5569b86fa 100644 --- a/serving/numaflow-models/src/models/source.rs +++ b/serving/numaflow-models/src/models/source.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Source { diff --git a/serving/numaflow-models/src/models/status.rs b/serving/numaflow-models/src/models/status.rs index cbfa91bf3d..c55ff4a245 100644 --- a/serving/numaflow-models/src/models/status.rs +++ b/serving/numaflow-models/src/models/status.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// Status : Status is a common structure which can be used for Status field. diff --git a/serving/numaflow-models/src/models/tag_conditions.rs b/serving/numaflow-models/src/models/tag_conditions.rs index 60d4b3ca58..3ffcff5976 100644 --- a/serving/numaflow-models/src/models/tag_conditions.rs +++ b/serving/numaflow-models/src/models/tag_conditions.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TagConditions { diff --git a/serving/numaflow-models/src/models/templates.rs b/serving/numaflow-models/src/models/templates.rs index 98876b49a8..205bcaeb84 100644 --- a/serving/numaflow-models/src/models/templates.rs +++ b/serving/numaflow-models/src/models/templates.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Templates { diff --git a/serving/numaflow-models/src/models/tls.rs b/serving/numaflow-models/src/models/tls.rs index b0c896e8ea..0834f6bc07 100644 --- a/serving/numaflow-models/src/models/tls.rs +++ b/serving/numaflow-models/src/models/tls.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Tls { diff --git a/serving/numaflow-models/src/models/transformer.rs b/serving/numaflow-models/src/models/transformer.rs index a17255f010..1df1d2fdef 100644 --- a/serving/numaflow-models/src/models/transformer.rs +++ b/serving/numaflow-models/src/models/transformer.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Transformer { diff --git a/serving/numaflow-models/src/models/ud_sink.rs b/serving/numaflow-models/src/models/ud_sink.rs index f247400f86..187dd6dabc 100644 --- a/serving/numaflow-models/src/models/ud_sink.rs +++ b/serving/numaflow-models/src/models/ud_sink.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UdSink { diff --git a/serving/numaflow-models/src/models/ud_source.rs b/serving/numaflow-models/src/models/ud_source.rs index 65986169a6..9f1be42d46 100644 --- a/serving/numaflow-models/src/models/ud_source.rs +++ b/serving/numaflow-models/src/models/ud_source.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UdSource { diff --git a/serving/numaflow-models/src/models/ud_transformer.rs b/serving/numaflow-models/src/models/ud_transformer.rs index ff698fab80..7a3c2ff7aa 100644 --- a/serving/numaflow-models/src/models/ud_transformer.rs +++ b/serving/numaflow-models/src/models/ud_transformer.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UdTransformer { diff --git a/serving/numaflow-models/src/models/udf.rs b/serving/numaflow-models/src/models/udf.rs index cd1f6b520f..c45bd8e23d 100644 --- a/serving/numaflow-models/src/models/udf.rs +++ b/serving/numaflow-models/src/models/udf.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Udf { diff --git a/serving/numaflow-models/src/models/vertex.rs b/serving/numaflow-models/src/models/vertex.rs index 1f9cc0e617..5ed690fabd 100644 --- a/serving/numaflow-models/src/models/vertex.rs +++ b/serving/numaflow-models/src/models/vertex.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Vertex { diff --git a/serving/numaflow-models/src/models/vertex_instance.rs b/serving/numaflow-models/src/models/vertex_instance.rs index e7ba664e60..d2f8100415 100644 --- a/serving/numaflow-models/src/models/vertex_instance.rs +++ b/serving/numaflow-models/src/models/vertex_instance.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// VertexInstance : VertexInstance is a wrapper of a vertex instance, which contains the vertex spec and the instance information such as hostname and replica index. diff --git a/serving/numaflow-models/src/models/vertex_limits.rs b/serving/numaflow-models/src/models/vertex_limits.rs index 6f201e8de8..53a7ce096a 100644 --- a/serving/numaflow-models/src/models/vertex_limits.rs +++ b/serving/numaflow-models/src/models/vertex_limits.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VertexLimits { diff --git a/serving/numaflow-models/src/models/vertex_list.rs b/serving/numaflow-models/src/models/vertex_list.rs index 68967d8aa5..091d047b07 100644 --- a/serving/numaflow-models/src/models/vertex_list.rs +++ b/serving/numaflow-models/src/models/vertex_list.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VertexList { diff --git a/serving/numaflow-models/src/models/vertex_spec.rs b/serving/numaflow-models/src/models/vertex_spec.rs index a3fafd4882..078879d286 100644 --- a/serving/numaflow-models/src/models/vertex_spec.rs +++ b/serving/numaflow-models/src/models/vertex_spec.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VertexSpec { diff --git a/serving/numaflow-models/src/models/vertex_status.rs b/serving/numaflow-models/src/models/vertex_status.rs index 6bcea1f106..b0c9cd0e9a 100644 --- a/serving/numaflow-models/src/models/vertex_status.rs +++ b/serving/numaflow-models/src/models/vertex_status.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VertexStatus { diff --git a/serving/numaflow-models/src/models/vertex_template.rs b/serving/numaflow-models/src/models/vertex_template.rs index 34aae02b0e..85d743546d 100644 --- a/serving/numaflow-models/src/models/vertex_template.rs +++ b/serving/numaflow-models/src/models/vertex_template.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VertexTemplate { diff --git a/serving/numaflow-models/src/models/watermark.rs b/serving/numaflow-models/src/models/watermark.rs index b54bf2a74b..76f044005a 100644 --- a/serving/numaflow-models/src/models/watermark.rs +++ b/serving/numaflow-models/src/models/watermark.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Watermark { diff --git a/serving/numaflow-models/src/models/window.rs b/serving/numaflow-models/src/models/window.rs index 28d4d42243..fa133174be 100644 --- a/serving/numaflow-models/src/models/window.rs +++ b/serving/numaflow-models/src/models/window.rs @@ -1,12 +1,20 @@ /* - * Numaflow - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: latest - * - * Generated by: https://openapi-generator.tech - */ +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. /// Window : Window describes windowing strategy diff --git a/serving/numaflow-models/templates/Cargo.mustache b/serving/numaflow-models/templates/Cargo.mustache new file mode 100644 index 0000000000..a4bbdfeae2 --- /dev/null +++ b/serving/numaflow-models/templates/Cargo.mustache @@ -0,0 +1,63 @@ +[package] +name = "{{{packageName}}}" +version = "{{#lambdaVersion}}{{{packageVersion}}}{{/lambdaVersion}}" +{{#infoEmail}} +authors = ["{{{.}}}"] +{{/infoEmail}} +{{^infoEmail}} +authors = ["The Numaproj Authors"] +{{/infoEmail}} +{{#appDescription}} +description = "Numaflow models" +{{/appDescription}} +license = "Apache-2.0 license" +edition = "2021" +{{#publishRustRegistry}} +publish = ["{{.}}"] +{{/publishRustRegistry}} +{{#repositoryUrl}} +repository = "{{.}}" +{{/repositoryUrl}} +{{#documentationUrl}} +documentation = "{{.}}" +{{/documentationUrl}} +{{#homePageUrl}} +homepage = "{{.}} +{{/homePageUrl}} + +[dependencies] +serde = "^1.0" +serde_derive = "^1.0" +{{#serdeWith}} +serde_with = "^2.0" +{{/serdeWith}} +serde_json = "^1.0" +url = "^2.2" +uuid = { version = "^1.0", features = ["serde", "v4"] } +{{#hyper}} +hyper = { version = "~0.14", features = ["full"] } +hyper-tls = "~0.5" +http = "~0.2" +base64 = "~0.7.0" +futures = "^0.3" +{{/hyper}} +{{#withAWSV4Signature}} +aws-sigv4 = "0.3.0" +http = "0.2.5" +secrecy = "0.8.0" +{{/withAWSV4Signature}} +{{#reqwest}} +{{^supportAsync}} +[dependencies.reqwest] +version = "^0.11" +features = ["json", "blocking", "multipart"] +{{/supportAsync}} +{{#supportAsync}} +{{#supportMiddleware}} +reqwest-middleware = "0.2.0" +{{/supportMiddleware}} +[dependencies.reqwest] +version = "^0.11" +features = ["json", "multipart"] +{{/supportAsync}} +{{/reqwest}} diff --git a/serving/numaflow-models/templates/partial_header.mustache b/serving/numaflow-models/templates/partial_header.mustache new file mode 100644 index 0000000000..db10e766d7 --- /dev/null +++ b/serving/numaflow-models/templates/partial_header.mustache @@ -0,0 +1,17 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. From 0df21eb98d224bd02f9f67eac1e3fb388d9da930 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 23:54:56 -0700 Subject: [PATCH 10/23] docs: updated CHANGELOG.md (#1916) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 87 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 20631f34b5..3b0b83f0f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,92 @@ # Changelog +## v1.3.0-rc1 (2024-08-08) + + * [179f5967](https://github.com/numaproj/numaflow/commit/179f59674a0a61eb7ae7cd7a83612f0eb7b3be7f) Update manifests to v1.3.0-rc1 + * [51cc125e](https://github.com/numaproj/numaflow/commit/51cc125eaa1f10cd896b0b5e6a7f9142659b179f) feat: introducing MonoVertex (#1911) + * [5e56a594](https://github.com/numaproj/numaflow/commit/5e56a594c8f6f23a4228ff0d740b6666e9f049a4) feat: Rust k8s model for Numaflow (#1898) + * [bc1451a3](https://github.com/numaproj/numaflow/commit/bc1451a35f3871c3459955956eaa35f48abea761) feat: Add ObservedGeneration field in vertex status and use it for calculating status (#1892) + * [280b9bd3](https://github.com/numaproj/numaflow/commit/280b9bd3edc95100791b2f23a1e3eb9930db675c) fix: configure discard policy for WorkQueue/Interest (#1884) + * [d2a67588](https://github.com/numaproj/numaflow/commit/d2a67588e3c9f165fab708a962c873beed235e95) feat: Sync the health status of ISBService, pipeline and vertex (#1860) + * [51a21fa8](https://github.com/numaproj/numaflow/commit/51a21fa8a4ba9c12ad3a14ddf6dab0ef53525d73) feat: expose replica metrics for ISB Service and Vertex (#1859) + * [e4e5f1c8](https://github.com/numaproj/numaflow/commit/e4e5f1c89bfa8913cff948dc7ae08ad828250cae) feat: new edge path (#1864) + * [c07425b5](https://github.com/numaproj/numaflow/commit/c07425b50b38f09a381c0673e9f344c5874aed7b) feat: pipeline and isbsvc resource health status metrics and detailed vertex type stats (#1856) + * [d708ffb0](https://github.com/numaproj/numaflow/commit/d708ffb0cb9c3f02e1be1fd3804ab9785b4fa2f1) feat: add controller and pipeline info metrics (#1855) + * [0b7f5a38](https://github.com/numaproj/numaflow/commit/0b7f5a3894fcceda5ff49a509c3a50a897b0a47d) fix: api docs for jetstream service (#1851) + * [1db0d093](https://github.com/numaproj/numaflow/commit/1db0d093b77d843911dc3a8890026b8e13bbf98a) feat: use same server-info file for all map modes (#1828) + * [ccfb8c2c](https://github.com/numaproj/numaflow/commit/ccfb8c2c0f804df3bc09933b4f9dfeba184b55f7) feat: Serving Source (#1806) + * [d620f1b1](https://github.com/numaproj/numaflow/commit/d620f1b16ec1bc9fd572c5106ed7093f82f417d2) feat: add ObservedGeneration to Pipeline and ISBService Status (#1799) + * [6e4a681f](https://github.com/numaproj/numaflow/commit/6e4a681ff260d95a08395ce5f70005c0c2c166d4) fix(#1832): scale down to >=min, but not 0 when there's direct back pressure (#1834) + * [fa18f97d](https://github.com/numaproj/numaflow/commit/fa18f97dc7fb9d611c3498b3cea2b2ecd8598b96) fix: should never scale down to < min (#1832) + * [cea0783f](https://github.com/numaproj/numaflow/commit/cea0783f660827daa2d36d413baab05114854a56) fix: value can be null (#1831) + * [251e84f2](https://github.com/numaproj/numaflow/commit/251e84f23eae3c54573bd87799055db750e50314) feat: enable restful daemon client option for UX server (#1826) + * [00619b67](https://github.com/numaproj/numaflow/commit/00619b671d636aac78fa2330cf334b014874fc18) feat: implement map batch (#1778) + * [8cff6d1f](https://github.com/numaproj/numaflow/commit/8cff6d1ff13bf4b3343084b737065fc03596a2f0) fix: save trait should accept Self as mutable (#1795) + * [2b0ac547](https://github.com/numaproj/numaflow/commit/2b0ac547e70209b87a3868684035265ca6dd619f) feat: crate for retry with backoff strategy (#1785) + * [5f3766ab](https://github.com/numaproj/numaflow/commit/5f3766ab9173d4b7a510c881f4d655089eebb504) feat: use protobuf to store wmb in KV (#1782) + * [21533393](https://github.com/numaproj/numaflow/commit/21533393125c8a2754f74f689ef6bf2f66c59830) feat: use protobuf to store header and messages in ISB (#1771) + * [07483c85](https://github.com/numaproj/numaflow/commit/07483c85540fb30f605a8d018ff4aa42ff7ac01d) fix: add retries when writing to redis and resp headers const (#1766) + * [1fc41e93](https://github.com/numaproj/numaflow/commit/1fc41e93991807cd4170474f9b3ec0b4dccb1a6a) feat: serving and tracking endpoint for Numaflow (#1765) + * [8da7c229](https://github.com/numaproj/numaflow/commit/8da7c2296b11d717cb5911256bd9d11af10b4ac1) feat: publish to callback endpoint for tracking (#1753) + * [f69d8303](https://github.com/numaproj/numaflow/commit/f69d8303268f79bdeaaa5603f44bbd38e0913e53) chore(deps): bump ws from 7.5.9 to 7.5.10 in /ui (#1762) + * [0f91c7ef](https://github.com/numaproj/numaflow/commit/0f91c7ef8e52819f096a23b1f2759b014952988e) chore(deps): bump braces from 3.0.2 to 3.0.3 in /ui (#1758) + * [b26008e8](https://github.com/numaproj/numaflow/commit/b26008e8414d99bda71ec23f5c1dd3770bf63972) feat(config): standardize boolean value with YAML tags. Fixes #1742 (#1749) + * [71bc030d](https://github.com/numaproj/numaflow/commit/71bc030dcea12ec97758b9023ba567744666c09a) feat: adding numaflow version to the UI (#1744) + * [1e03eee4](https://github.com/numaproj/numaflow/commit/1e03eee4f2ea123310db02d89775e20f85906d0f) fix: update SDKs to stable image (#1746) + * [30c42f63](https://github.com/numaproj/numaflow/commit/30c42f635c3d9840e1a3f5acdfed1d1a3d33e0be) doc: update roadmap (#1748) + * [61a17aaf](https://github.com/numaproj/numaflow/commit/61a17aafa82219ddb17b1b40a3c39e297c7f6584) Tick generator blob - Closes #1732 (#1733) + * [3759cde0](https://github.com/numaproj/numaflow/commit/3759cde082f273d2bc126fa2f463d47a4eda09bd) fix: Read from Oldest Offset for Idle Source Kafka e2e (#1731) + * [0cd57bc4](https://github.com/numaproj/numaflow/commit/0cd57bc4e6f54e3e1d756f59f67ec4720823f3a9) feat: Built-in Jetstream source implementation (Closes #1695) (#1723) + * [d2580c6d](https://github.com/numaproj/numaflow/commit/d2580c6d8bd5a83b56ca69cb0858af001f9b187b) fix: height fixes to render pipeline view (#1720) + * [3d9358f9](https://github.com/numaproj/numaflow/commit/3d9358f9c0670647bad76aa16ff5c8e188ff5a63) doc: add numaflow-controller-config link (#1719) + * [1c772576](https://github.com/numaproj/numaflow/commit/1c7725766ac1f4447536faf902d33c31983bda84) fix: summary bar overlay fix for plugin (#1710) + * [7ab5788d](https://github.com/numaproj/numaflow/commit/7ab5788d2478e6444b6212a761fda2793f417e4a) chore(deps): bump ejs from 3.1.9 to 3.1.10 in /ui (#1711) + * [88b89bc0](https://github.com/numaproj/numaflow/commit/88b89bc0f53268a687ae7c438d6c9f26dc10881c) doc: add "nav" for fallback-sink (#1694) + * [6da27960](https://github.com/numaproj/numaflow/commit/6da279605e29d55523449b3d4d5df0391961d66c) doc: reduce streaming (#1689) + * [aea4a329](https://github.com/numaproj/numaflow/commit/aea4a329be26ef50566b08d00b4bdd11a5a07e17) doc: Fallback Sink (#1691) + * [2f854b0d](https://github.com/numaproj/numaflow/commit/2f854b0d6d2b249393fe94eccdc0c2780b2f451d) chore(deps): bump golang.org/x/net from 0.22.0 to 0.23.0 (#1692) + * [9bfcf880](https://github.com/numaproj/numaflow/commit/9bfcf880f2890885cd3e527b632d27f584c7edef) doc: session doc (#1650) + * [68541358](https://github.com/numaproj/numaflow/commit/685413587e885213c3b036a8bcede198e04d0fa8) fix: version downgrade for monaco-editor (#1673) + * [db0d2ed1](https://github.com/numaproj/numaflow/commit/db0d2ed1d9208cbe52123b3927f49fdfb30442ba) feat: Fallback Sink (#1669) + * [06ca9bc4](https://github.com/numaproj/numaflow/commit/06ca9bc42ad1a88f6cbd4cbb4b76c01cc325e48c) fix: routing fixes (#1671) + * [3bb93820](https://github.com/numaproj/numaflow/commit/3bb938204de9c18925f1058a4ac3666498be64e9) feat: controller change for fallback sink (#1664) + * [268b00d1](https://github.com/numaproj/numaflow/commit/268b00d1ded263faf44d7c509c1d1b0e9d85998f) Enable cors for numaflow api (#1631) + * [e9c3731b](https://github.com/numaproj/numaflow/commit/e9c3731bcb53f5b5c18695efee2d2a4c5b616adc) feat: expose controller leader election duration and renew opts (#1657) + * [e7cf8c77](https://github.com/numaproj/numaflow/commit/e7cf8c773372d3609c358c8aac0fa65eca4cbedf) fix: add headers to custom sinkrequest (#1653) + * [2ef4286c](https://github.com/numaproj/numaflow/commit/2ef4286c90a4e02631480d70d04ba250a4f426ee) fix: pass headers to transfomer (#1651) + * [75195d56](https://github.com/numaproj/numaflow/commit/75195d56dcc25bcbe277abe257c3d7eb7378ca86) fix: avoid publishing watermarks for duplicate messages. (#1649) + * [872d8a83](https://github.com/numaproj/numaflow/commit/872d8a8399d9d782b9fe95929e029d158da999c1) fix: flaky TestDropOnFull (#1647) + * [645a6941](https://github.com/numaproj/numaflow/commit/645a694173a7d7a34cb7ad4e26d445e6de053886) fix: Dedup not working for multi-partitioned edge (#1639) + * [a6297030](https://github.com/numaproj/numaflow/commit/a6297030792ca25435fb1c35f14c71e2b8daaf8b) fix: readonly view (#1640) + * [0c68cd40](https://github.com/numaproj/numaflow/commit/0c68cd40a3d4065b49e335a0a53d3c5f85925811) feat: read only view for UI (#1628) + * [3dbba4f6](https://github.com/numaproj/numaflow/commit/3dbba4f64931313267cb9023c2e787bd86a111f6) fix: race condition while publishing wm inside reduce (#1599) + * [74ab70aa](https://github.com/numaproj/numaflow/commit/74ab70aaf992af370a5ffe779e1b3147263d84ad) fix: bug in late message handling for sliding window (#1471) + * [35c2fe00](https://github.com/numaproj/numaflow/commit/35c2fe00a0bef24a658ebecfc3a39de50c195ed0) fix: numaflow package style fixes (#1622) + * [f1e5ba0e](https://github.com/numaproj/numaflow/commit/f1e5ba0eb222edf3e5a5593769efc3626b092c1b) doc: add new user to the list (#1623) + * [caf49c91](https://github.com/numaproj/numaflow/commit/caf49c9197398cdaac9c97c7d4e126b9eeeaeb19) fix: watermark progression during pods creation/deletion (#1619) + * [756e66e6](https://github.com/numaproj/numaflow/commit/756e66e6be14e2bd0d56d7a9a3fe48bb2aa1c385) fix: allow pipeline to start with redis isbsvc (Fixes: #1513) (#1567) + * [ef94def9](https://github.com/numaproj/numaflow/commit/ef94def97157d1f532588f33504a17dd8e266623) fix: dedup in user defined source (#1613) + * [c0b9fad2](https://github.com/numaproj/numaflow/commit/c0b9fad219de930de61a3bcb04cfff2bd60320c9) chore(deps): bump express from 4.18.2 to 4.19.2 in /ui (#1609) + +### Contributors + + * Ali Ibrahim + * Chandan Kumar + * Charan + * Derek Wang + * Keran Yang + * Matt Warner + * Naga + * Quentin FAIDIDE + * Sidhant Kohli + * Sreekanth + * Vedant Gupta + * Vigith Maurice + * Yashash H L + * dependabot[bot] + * samhith-kakarla + * xdevxy + ## v1.2.1 (2024-05-07) * [89ea33f1](https://github.com/numaproj/numaflow/commit/89ea33f1d69785f6f5f17f1d5854ac189003918a) Update manifests to v1.2.1 From 7971f221173f8e107628ea9d023329873518cd03 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Fri, 9 Aug 2024 13:01:00 -0700 Subject: [PATCH 11/23] feat: source to sink with an optional transformer without ISB (#1904) Signed-off-by: Vigith Maurice Signed-off-by: Yashash H L Signed-off-by: Sidhant Kohli Co-authored-by: Yashash H L Co-authored-by: Sidhant Kohli --- Dockerfile | 20 +- Makefile | 2 +- examples/21-simple-mono-vertex.yaml | 2 +- .../numaflow/v1alpha1/mono_vertex_types.go | 3 +- pkg/apis/numaflow/v1alpha1/vertex_types.go | 1 + pkg/metrics/metrics_server.go | 1 - pkg/udf/map_udf.go | 5 - serving/Cargo.lock | 271 +++++++++++- serving/Cargo.toml | 2 +- serving/Dockerfile | 20 +- serving/source-sink/Cargo.toml | 39 ++ serving/source-sink/Dockerfile | 19 + serving/source-sink/build.rs | 13 + serving/source-sink/proto/sink.proto | 57 +++ serving/source-sink/proto/source.proto | 153 +++++++ .../source-sink/proto/sourcetransform.proto | 47 ++ serving/source-sink/src/config.rs | 153 +++++++ serving/source-sink/src/error.rs | 36 ++ serving/source-sink/src/forwarder.rs | 415 ++++++++++++++++++ serving/source-sink/src/lib.rs | 306 +++++++++++++ serving/source-sink/src/main.rs | 64 +++ serving/source-sink/src/message.rs | 85 ++++ serving/source-sink/src/metrics.rs | 332 ++++++++++++++ serving/source-sink/src/shared.rs | 38 ++ serving/source-sink/src/sink.rs | 174 ++++++++ serving/source-sink/src/source.rs | 249 +++++++++++ serving/source-sink/src/transformer.rs | 159 +++++++ 27 files changed, 2646 insertions(+), 20 deletions(-) create mode 100644 serving/source-sink/Cargo.toml create mode 100644 serving/source-sink/Dockerfile create mode 100644 serving/source-sink/build.rs create mode 100644 serving/source-sink/proto/sink.proto create mode 100644 serving/source-sink/proto/source.proto create mode 100644 serving/source-sink/proto/sourcetransform.proto create mode 100644 serving/source-sink/src/config.rs create mode 100644 serving/source-sink/src/error.rs create mode 100644 serving/source-sink/src/forwarder.rs create mode 100644 serving/source-sink/src/lib.rs create mode 100644 serving/source-sink/src/main.rs create mode 100644 serving/source-sink/src/message.rs create mode 100644 serving/source-sink/src/metrics.rs create mode 100644 serving/source-sink/src/shared.rs create mode 100644 serving/source-sink/src/sink.rs create mode 100644 serving/source-sink/src/source.rs create mode 100644 serving/source-sink/src/transformer.rs diff --git a/Dockerfile b/Dockerfile index 4ea993836f..350a9fe4e0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,6 +17,9 @@ FROM rust:1.79-bookworm as extension-base RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash +RUN apt-get update +RUN apt-get install protobuf-compiler -y + RUN cargo new serve # Create a new empty shell project WORKDIR /serve @@ -32,6 +35,9 @@ COPY ./serving/backoff/Cargo.toml ./backoff/ RUN cargo new numaflow-models COPY ./serving/numaflow-models/Cargo.toml ./numaflow-models/ +RUN cargo new source-sink +COPY ./serving/source-sink/Cargo.toml ./source-sink/ + # Copy all Cargo.toml and Cargo.lock files for caching dependencies COPY ./serving/Cargo.toml ./serving/Cargo.lock ./ @@ -44,21 +50,29 @@ COPY ./serving/servesink/src ./servesink/src COPY ./serving/extras/upstreams/src ./extras/upstreams/src COPY ./serving/backoff/src ./backoff/src COPY ./serving/numaflow-models/src ./numaflow-models/src +COPY ./serving/source-sink/src ./source-sink/src +COPY ./serving/source-sink/build.rs ./source-sink/build.rs +COPY ./serving/source-sink/proto ./source-sink/proto # Build the real binaries -RUN touch src/main.rs servesink/main.rs extras/upstreams/main.rs numaflow-models/main.rs && \ - cargo build --release +RUN touch src/main.rs servesink/src/main.rs numaflow-models/src/main.rs source-sink/src/main.rs && \ + cargo build --workspace --all --release #################################################################################################### # numaflow #################################################################################################### ARG BASE_IMAGE -FROM ${BASE_IMAGE} as numaflow +FROM debian:bookworm as numaflow + +# Install necessary libraries +RUN apt-get update && apt-get install -y libssl3 COPY --from=base /bin/numaflow /bin/numaflow COPY ui/build /ui/build COPY --from=extension-base /serve/target/release/serve /bin/serve +COPY --from=extension-base /serve/target/release/sourcer-sinker /bin/sourcer-sinker + COPY ./serving/config config ENTRYPOINT [ "/bin/numaflow" ] diff --git a/Makefile b/Makefile index 4e1ee98fa4..1c11b01583 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ DIST_DIR=${CURRENT_DIR}/dist BINARY_NAME:=numaflow DOCKERFILE:=Dockerfile DEV_BASE_IMAGE:=debian:bookworm -RELEASE_BASE_IMAGE:=gcr.io/distroless/cc-debian12 +RELEASE_BASE_IMAGE:=debian:bookworm BUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ') GIT_COMMIT=$(shell git rev-parse HEAD) diff --git a/examples/21-simple-mono-vertex.yaml b/examples/21-simple-mono-vertex.yaml index b1f7dbd1fd..be625c41d2 100644 --- a/examples/21-simple-mono-vertex.yaml +++ b/examples/21-simple-mono-vertex.yaml @@ -10,4 +10,4 @@ spec: sink: udsink: container: - image: quay.io/numaio/numaflow-java/simple-sink:stable + image: quay.io/numaio/numaflow-java/simple-sink:stable \ No newline at end of file diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index 359056c14c..7dd39fd2e8 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -403,7 +403,8 @@ func (mvspec MonoVertexSpec) DeepCopyWithoutReplicas() MonoVertexSpec { func (mvspec MonoVertexSpec) buildContainers(req getContainerReq) []corev1.Container { mainContainer := containerBuilder{}. - init(req).command("/bin/serve").build() // TODO: command + init(req).command(MonoVertexBinary).build() + containers := []corev1.Container{mainContainer} if mvspec.Source.UDSource != nil { // Only support UDSource for now. containers = append(containers, mvspec.Source.getUDSourceContainer(req)) diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types.go b/pkg/apis/numaflow/v1alpha1/vertex_types.go index c93af36a98..71cd9c7171 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types.go @@ -51,6 +51,7 @@ const ( ) const ServingBinary = "/bin/serve" +const MonoVertexBinary = "/bin/sourcer-sinker" // +genclient // +kubebuilder:object:root=true diff --git a/pkg/metrics/metrics_server.go b/pkg/metrics/metrics_server.go index bb7626aa78..7cafaa9070 100644 --- a/pkg/metrics/metrics_server.go +++ b/pkg/metrics/metrics_server.go @@ -148,7 +148,6 @@ func NewMetricsServer(vertex *dfv1.Vertex, opts ...Option) *metricsServer { if m.lagReaders != nil { for partitionName := range m.lagReaders { m.partitionPendingInfo[partitionName] = sharedqueue.New[timestampedPending](1800) - } } return m diff --git a/pkg/udf/map_udf.go b/pkg/udf/map_udf.go index 3e33d5f8b9..44cb1b5aa7 100644 --- a/pkg/udf/map_udf.go +++ b/pkg/udf/map_udf.go @@ -219,11 +219,6 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { opts = append(opts, forward.WithUDFUnaryMap(mapHandler)) } - // We can have the vertex running only of the map modes - if enableMapUdfStream && enableBatchMapUdf { - return fmt.Errorf("vertex cannot have both map stream and batch map modes enabled") - } - for index, bufferPartition := range fromBuffer { // Populate shuffle function map shuffleFuncMap := make(map[string]*shuffle.Shuffle) diff --git a/serving/Cargo.lock b/serving/Cargo.lock index e27de24107..0e70179df8 100644 --- a/serving/Cargo.lock +++ b/serving/Cargo.lock @@ -143,6 +143,33 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +[[package]] +name = "aws-lc-rs" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ae74d9bd0a7530e8afd1770739ad34b36838829d6ad61818f9230f683f5ad77" +dependencies = [ + "aws-lc-sys", + "mirai-annotations", + "paste", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", + "libc", + "paste", +] + [[package]] name = "axum" version = "0.7.5" @@ -210,6 +237,30 @@ dependencies = [ "syn", ] +[[package]] +name = "axum-server" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" +dependencies = [ + "arc-swap", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "pin-project-lite", + "rustls", + "rustls-pemfile 2.1.3", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower", + "tower-service", +] + [[package]] name = "backoff" version = "0.1.0" @@ -251,6 +302,29 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "bindgen" +version = "0.69.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +dependencies = [ + "bitflags 2.6.0", + "cexpr", + "clang-sys", + "itertools 0.12.1", + "lazy_static", + "lazycell", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn", + "which", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -301,6 +375,19 @@ name = "cc" version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26a5c3fd7bfa1ce3897a3a3501d362b2d87b7f2583ebcb4a949ec25911025cbc" +dependencies = [ + "jobserver", + "libc", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] [[package]] name = "cfg-if" @@ -323,6 +410,26 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "cmake" +version = "0.1.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +dependencies = [ + "cc", +] + [[package]] name = "combine" version = "4.6.7" @@ -520,6 +627,12 @@ dependencies = [ "const-random", ] +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "ed25519" version = "2.2.3" @@ -621,6 +734,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "futures" version = "0.3.30" @@ -737,6 +856,12 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + [[package]] name = "h2" version = "0.3.26" @@ -1120,6 +1245,15 @@ version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.13.0" @@ -1135,6 +1269,15 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +[[package]] +name = "jobserver" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.69" @@ -1253,12 +1396,28 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + [[package]] name = "libc" version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +[[package]] +name = "libloading" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +dependencies = [ + "cfg-if", + "windows-targets 0.52.6", +] + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -1390,6 +1549,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + [[package]] name = "multimap" version = "0.10.0" @@ -1644,6 +1809,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + [[package]] name = "pathdiff" version = "0.2.1" @@ -1836,7 +2007,7 @@ checksum = "5bb182580f71dd070f88d01ce3de9f4da5021db7115d2e1c3605a754153b77c1" dependencies = [ "bytes", "heck 0.5.0", - "itertools", + "itertools 0.13.0", "log", "multimap", "once_cell", @@ -1856,7 +2027,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" dependencies = [ "anyhow", - "itertools", + "itertools 0.13.0", "proc-macro2", "quote", "syn", @@ -1934,6 +2105,19 @@ dependencies = [ "bitflags 2.6.0", ] +[[package]] +name = "rcgen" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54077e1872c46788540de1ea3d7f4ccb1983d12f9aa909b234468676c1a36779" +dependencies = [ + "pem", + "ring", + "rustls-pki-types", + "time", + "yasna", +] + [[package]] name = "redis" version = "0.26.1" @@ -2139,6 +2323,12 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + [[package]] name = "rustc_version" version = "0.4.0" @@ -2167,6 +2357,7 @@ version = "0.23.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" dependencies = [ + "aws-lc-rs", "log", "once_cell", "ring", @@ -2220,6 +2411,7 @@ version = "0.102.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -2473,6 +2665,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -2535,6 +2733,40 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "sourcer-sinker" +version = "0.1.0" +dependencies = [ + "axum", + "axum-server", + "base64 0.22.1", + "bytes", + "chrono", + "hyper-util", + "log", + "metrics", + "metrics-exporter-prometheus", + "numaflow", + "numaflow-models", + "once_cell", + "prost", + "prost-types", + "rcgen", + "rustls", + "serde_json", + "tempfile", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tonic", + "tonic-build", + "tower", + "tracing", + "tracing-subscriber", + "uuid", +] + [[package]] name = "spin" version = "0.9.8" @@ -3209,6 +3441,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix", +] + [[package]] name = "winapi" version = "0.3.9" @@ -3417,6 +3661,15 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + [[package]] name = "zerocopy" version = "0.7.35" @@ -3443,3 +3696,17 @@ name = "zeroize" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/serving/Cargo.toml b/serving/Cargo.toml index 10138b1386..58525ad62b 100644 --- a/serving/Cargo.toml +++ b/serving/Cargo.toml @@ -1,4 +1,4 @@ -workspace = { members = ["backoff", "extras/upstreams", "numaflow-models", "servesink"] } +workspace = { members = ["backoff", "extras/upstreams", "numaflow-models", "servesink", "source-sink"] } [package] name = "serve" version = "0.1.0" diff --git a/serving/Dockerfile b/serving/Dockerfile index 863b999a81..697cfd6f27 100644 --- a/serving/Dockerfile +++ b/serving/Dockerfile @@ -4,6 +4,9 @@ FROM rust:1.79-bookworm as builder RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash +RUN apt-get update +RUN apt-get install protobuf-compiler -y + RUN cargo new serve # Create a new empty shell project WORKDIR /serve @@ -19,6 +22,9 @@ COPY ./backoff/Cargo.toml ./backoff/Cargo.toml RUN cargo new numaflow-models COPY ./numaflow-models/Cargo.toml ./numaflow-models/ +RUN cargo new source-sink +COPY ./source-sink/Cargo.toml ./source-sink/Cargo.toml + # Copy all Cargo.toml and Cargo.lock files for caching dependencies COPY ./Cargo.toml ./Cargo.lock ./ @@ -31,16 +37,20 @@ COPY ./servesink/src ./servesink/src COPY ./extras/upstreams/src ./extras/upstreams/src COPY ./backoff/src ./backoff/src COPY ./numaflow-models/src ./numaflow-models/src +COPY ./source-sink/src ./source-sink/src +COPY ./source-sink/build.rs ./source-sink/build.rs +COPY ./source-sink/proto ./source-sink/proto # Build the real binaries -RUN touch src/main.rs servesink/main.rs extras/upstreams/main.rs numaflow-models/main.rs && \ - cargo build --release +RUN touch src/main.rs servesink/src/main.rs numaflow-models/src/main.rs source-sink/src/main.rs && \ + cargo build --workspace --all --release # Use a lightweight image for the runtime -FROM gcr.io/distroless/cc-debian12 as numaflow-ext +FROM debian:bookworm as numaflow-ext -COPY --from=builder /serve/target/release/serve . -COPY ./config config +RUN apt-get update && apt-get install -y libssl3 +COPY --from=builder /serve/target/release/ . +COPY ./config config ENTRYPOINT ["./serve"] \ No newline at end of file diff --git a/serving/source-sink/Cargo.toml b/serving/source-sink/Cargo.toml new file mode 100644 index 0000000000..9db2bbff35 --- /dev/null +++ b/serving/source-sink/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "sourcer-sinker" +version = "0.1.0" +edition = "2021" + +[dependencies] +axum = "0.7.5" +axum-server = { version = "0.7.1", features = ["tls-rustls"] } +tonic = "0.12.0" +bytes = "1.7.1" +thiserror = "1.0.63" +tokio = { version = "1.39.2", features = ["full"] } +tracing = "0.1.40" +tokio-util = "0.7.11" +tokio-stream = "0.1.15" +prost = "0.13.1" +prost-types = "0.13.1" +chrono = "0.4.31" +base64 = "0.22.1" +metrics = { version = "0.23.0", default-features = false } +metrics-exporter-prometheus = { version = "0.15.3", default-features = false } +log = "0.4.22" +tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +hyper-util = "0.1.6" +tower = "0.4.13" +uuid = { version = "1.10.0", features = ["v4"] } +once_cell = "1.19.0" +serde_json = "1.0.122" +numaflow-models = { path = "../numaflow-models"} +rcgen = "0.13.1" +rustls = { version = "0.23.12", features = ["aws_lc_rs"] } + +[dev-dependencies] +tower = "0.4.13" +tempfile = "3.11.0" +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch="main" } + +[build-dependencies] +tonic-build = "0.12.1" diff --git a/serving/source-sink/Dockerfile b/serving/source-sink/Dockerfile new file mode 100644 index 0000000000..4ed8bb62f7 --- /dev/null +++ b/serving/source-sink/Dockerfile @@ -0,0 +1,19 @@ +FROM rust:1.76-bookworm AS build + +RUN apt-get update +RUN apt-get install protobuf-compiler -y + +WORKDIR /source-sink +COPY ./ ./ + +# build for release +RUN cargo build --release + +# our final base +FROM debian:bookworm AS simple-source + +# copy the build artifact from the build stage +COPY --from=build /source-sink/target/release/source-sink /bin/serve + +# set the startup command to run your binary +CMD ["/bin/serve"] diff --git a/serving/source-sink/build.rs b/serving/source-sink/build.rs new file mode 100644 index 0000000000..fc30e6b678 --- /dev/null +++ b/serving/source-sink/build.rs @@ -0,0 +1,13 @@ +fn main() { + tonic_build::configure() + .build_server(true) + .compile( + &[ + "proto/source.proto", + "proto/sourcetransform.proto", + "proto/sink.proto", + ], + &["proto"], + ) + .unwrap_or_else(|e| panic!("failed to compile the proto, {:?}", e)) +} diff --git a/serving/source-sink/proto/sink.proto b/serving/source-sink/proto/sink.proto new file mode 100644 index 0000000000..c413ea863b --- /dev/null +++ b/serving/source-sink/proto/sink.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +package sink.v1; + +service Sink { + // SinkFn writes the request to a user defined sink. + rpc SinkFn(stream SinkRequest) returns (SinkResponse); + + // IsReady is the heartbeat endpoint for gRPC. + rpc IsReady(google.protobuf.Empty) returns (ReadyResponse); +} + +/** + * SinkRequest represents a request element. + */ +message SinkRequest { + repeated string keys = 1; + bytes value = 2; + google.protobuf.Timestamp event_time = 3; + google.protobuf.Timestamp watermark = 4; + string id = 5; + map headers = 6; +} + +/** + * ReadyResponse is the health check result. + */ +message ReadyResponse { + bool ready = 1; +} + +/** + * SinkResponse is the individual response of each message written to the sink. + */ +message SinkResponse { + message Result { + // id is the ID of the message, can be used to uniquely identify the message. + string id = 1; + // status denotes the status of persisting to sink. It can be SUCCESS, FAILURE, or FALLBACK. + Status status = 2; + // err_msg is the error message, set it if success is set to false. + string err_msg = 3; + } + repeated Result results = 1; +} + +/* + * Status is the status of the response. + */ +enum Status { + SUCCESS = 0; + FAILURE = 1; + FALLBACK = 2; +} \ No newline at end of file diff --git a/serving/source-sink/proto/source.proto b/serving/source-sink/proto/source.proto new file mode 100644 index 0000000000..131cc36d30 --- /dev/null +++ b/serving/source-sink/proto/source.proto @@ -0,0 +1,153 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/empty.proto"; + +package source.v1; + +service Source { + // Read returns a stream of datum responses. + // The size of the returned ReadResponse is less than or equal to the num_records specified in ReadRequest. + // If the request timeout is reached on server side, the returned ReadResponse will contain all the datum that have been read (which could be an empty list). + rpc ReadFn(ReadRequest) returns (stream ReadResponse); + + // AckFn acknowledges a list of datum offsets. + // When AckFn is called, it implicitly indicates that the datum stream has been processed by the source vertex. + // The caller (numa) expects the AckFn to be successful, and it does not expect any errors. + // If there are some irrecoverable errors when the callee (UDSource) is processing the AckFn request, + // then it is best to crash because there are no other retry mechanisms possible. + rpc AckFn(AckRequest) returns (AckResponse); + + // PendingFn returns the number of pending records at the user defined source. + rpc PendingFn(google.protobuf.Empty) returns (PendingResponse); + + // PartitionsFn returns the list of partitions for the user defined source. + rpc PartitionsFn(google.protobuf.Empty) returns (PartitionsResponse); + + // IsReady is the heartbeat endpoint for user defined source gRPC. + rpc IsReady(google.protobuf.Empty) returns (ReadyResponse); +} + +/* + * ReadRequest is the request for reading datum stream from user defined source. + */ +message ReadRequest { + message Request { + // Required field indicating the number of records to read. + uint64 num_records = 1; + // Required field indicating the request timeout in milliseconds. + // uint32 can represent 2^32 milliseconds, which is about 49 days. + // We don't use uint64 because time.Duration takes int64 as nano seconds. Using uint64 for milli will cause overflow. + uint32 timeout_in_ms = 2; + } + // Required field indicating the request. + Request request = 1; +} + +/* + * ReadResponse is the response for reading datum stream from user defined source. + */ +message ReadResponse { + message Result { + // Required field holding the payload of the datum. + bytes payload = 1; + // Required field indicating the offset information of the datum. + Offset offset = 2; + // Required field representing the time associated with each datum. It is used for watermarking. + google.protobuf.Timestamp event_time = 3; + // Optional list of keys associated with the datum. + // Key is the "key" attribute in (key,value) as in the map-reduce paradigm. + // We add this optional field to support the use case where the user defined source can provide keys for the datum. + // e.g. Kafka and Redis Stream message usually include information about the keys. + repeated string keys = 4; + // Optional list of headers associated with the datum. + // Headers are the metadata associated with the datum. + // e.g. Kafka and Redis Stream message usually include information about the headers. + map headers = 5; + } + // Required field holding the result. + Result result = 1; +} + +/* + * AckRequest is the request for acknowledging datum. + * It takes a list of offsets to be acknowledged. + */ +message AckRequest { + message Request { + // Required field holding a list of offsets to be acknowledged. + // The offsets must be strictly corresponding to the previously read batch, + // meaning the offsets must be in the same order as the datum responses in the ReadResponse. + // By enforcing ordering, we can save deserialization effort on the server side, assuming the server keeps a local copy of the raw/un-serialized offsets. + repeated Offset offsets = 1; + } + // Required field holding the request. The list will be ordered and will have the same order as the original Read response. + Request request = 1; +} + +/* + * AckResponse is the response for acknowledging datum. It contains one empty field confirming + * the batch of offsets that have been successfully acknowledged. The contract between client and server + * is that the server will only return the AckResponse if the ack request is successful. + * If the server hangs during the ack request, the client can decide to timeout and error out the data forwarder. + * The reason why we define such contract is that we always expect the server to be able to process the ack request. + * Client is expected to send the AckRequest to the server with offsets that are strictly + * corresponding to the previously read batch. If the client sends the AckRequest with offsets that are not, + * it is considered as a client error and the server will not return the AckResponse. + */ +message AckResponse { + message Result { + // Required field indicating the ack request is successful. + google.protobuf.Empty success = 1; + } + // Required field holding the result. + Result result = 1; +} + +/* + * ReadyResponse is the health check result for user defined source. + */ +message ReadyResponse { + // Required field holding the health check result. + bool ready = 1; +} + +/* + * PendingResponse is the response for the pending request. + */ +message PendingResponse { + message Result { + // Required field holding the number of pending records at the user defined source. + // A negative count indicates that the pending information is not available. + int64 count = 1; + } + // Required field holding the result. + Result result = 1; +} + +/* + * PartitionsResponse is the response for the partitions request. + */ +message PartitionsResponse { + message Result { + // Required field holding the list of partitions. + repeated int32 partitions = 1; + } + // Required field holding the result. + Result result = 1; +} + +/* + * Offset is the offset of the datum. + */ +message Offset { + // offset is the offset of the datum. This field is required. + // We define Offset as a byte array because different input data sources can have different representations for Offset. + // The only way to generalize it is to define it as a byte array, + // Such that we can let the UDSource to de-serialize the offset using its own interpretation logics. + bytes offset = 1; + // Optional partition_id indicates which partition of the source the datum belongs to. + // It is useful for sources that have multiple partitions. e.g. Kafka. + // If the partition_id is not specified, it is assumed that the source has a single partition. + int32 partition_id = 2; +} \ No newline at end of file diff --git a/serving/source-sink/proto/sourcetransform.proto b/serving/source-sink/proto/sourcetransform.proto new file mode 100644 index 0000000000..18e045c323 --- /dev/null +++ b/serving/source-sink/proto/sourcetransform.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/empty.proto"; + +package sourcetransformer.v1; + +service SourceTransform { + // SourceTransformFn applies a function to each request element. + // In addition to map function, SourceTransformFn also supports assigning a new event time to response. + // SourceTransformFn can be used only at source vertex by source data transformer. + rpc SourceTransformFn(SourceTransformRequest) returns (SourceTransformResponse); + + // IsReady is the heartbeat endpoint for gRPC. + rpc IsReady(google.protobuf.Empty) returns (ReadyResponse); +} + +/** + * SourceTransformerRequest represents a request element. + */ +message SourceTransformRequest { + repeated string keys = 1; + bytes value = 2; + google.protobuf.Timestamp event_time = 3; + google.protobuf.Timestamp watermark = 4; + map headers = 5; +} + +/** + * SourceTransformerResponse represents a response element. + */ +message SourceTransformResponse { + message Result { + repeated string keys = 1; + bytes value = 2; + google.protobuf.Timestamp event_time = 3; + repeated string tags = 4; + } + repeated Result results = 1; +} + +/** + * ReadyResponse is the health check result. + */ +message ReadyResponse { + bool ready = 1; +} \ No newline at end of file diff --git a/serving/source-sink/src/config.rs b/serving/source-sink/src/config.rs new file mode 100644 index 0000000000..9ac27a3413 --- /dev/null +++ b/serving/source-sink/src/config.rs @@ -0,0 +1,153 @@ +use crate::error::Error; +use base64::prelude::BASE64_STANDARD; +use base64::Engine; +use log::LevelFilter; +use numaflow_models::models::MonoVertex; +use std::env; +use std::sync::OnceLock; + +const ENV_MONO_VERTEX_OBJ: &str = "NUMAFLOW_MONO_VERTEX_OBJECT"; +const ENV_GRPC_MAX_MESSAGE_SIZE: &str = "NUMAFLOW_GRPC_MAX_MESSAGE_SIZE"; +const ENV_POD_REPLICA: &str = "NUMAFLOW_REPLICA"; +const DEFAULT_GRPC_MAX_MESSAGE_SIZE: usize = 64 * 1024 * 1024; // 64 MB +const DEFAULT_METRICS_PORT: u16 = 2469; +const ENV_LOG_LEVEL: &str = "NUMAFLOW_DEBUG"; +const DEFAULT_LAG_CHECK_INTERVAL_IN_SECS: u16 = 5; +const DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS: u16 = 3; +const DEFAULT_BATCH_SIZE: u64 = 500; +const DEFAULT_TIMEOUT_IN_MS: u32 = 1000; + +pub fn config() -> &'static Settings { + static CONF: OnceLock = OnceLock::new(); + CONF.get_or_init(|| match Settings::load() { + Ok(v) => v, + Err(e) => { + panic!("Failed to load configuration: {:?}", e); + } + }) +} + +pub struct Settings { + pub mono_vertex_name: String, + pub replica: u32, + pub batch_size: u64, + pub timeout_in_ms: u32, + pub metrics_server_listen_port: u16, + pub log_level: String, + pub grpc_max_message_size: usize, + pub is_transformer_enabled: bool, + pub lag_check_interval_in_secs: u16, + pub lag_refresh_interval_in_secs: u16, +} + +impl Default for Settings { + fn default() -> Self { + Self { + mono_vertex_name: "default".to_string(), + replica: 0, + batch_size: DEFAULT_BATCH_SIZE, + timeout_in_ms: DEFAULT_TIMEOUT_IN_MS, + metrics_server_listen_port: DEFAULT_METRICS_PORT, + log_level: "info".to_string(), + grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, + is_transformer_enabled: false, + lag_check_interval_in_secs: DEFAULT_LAG_CHECK_INTERVAL_IN_SECS, + lag_refresh_interval_in_secs: DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS, + } + } +} + +impl Settings { + fn load() -> Result { + let mut settings = Settings::default(); + if let Ok(mono_vertex_spec) = env::var(ENV_MONO_VERTEX_OBJ) { + // decode the spec it will be base64 encoded + let mono_vertex_spec = BASE64_STANDARD + .decode(mono_vertex_spec.as_bytes()) + .map_err(|e| { + Error::ConfigError(format!("Failed to decode mono vertex spec: {:?}", e)) + })?; + + let mono_vertex_obj: MonoVertex = + serde_json::from_slice(&mono_vertex_spec).map_err(|e| { + Error::ConfigError(format!("Failed to parse mono vertex spec: {:?}", e)) + })?; + + settings.batch_size = mono_vertex_obj + .spec + .limits + .clone() + .unwrap() + .read_batch_size + .map(|x| x as u64) + .unwrap_or(DEFAULT_BATCH_SIZE); + + settings.timeout_in_ms = mono_vertex_obj + .spec + .limits + .clone() + .unwrap() + .read_timeout + .map(|x| std::time::Duration::from(x).as_millis() as u32) + .unwrap_or(DEFAULT_TIMEOUT_IN_MS); + + settings.mono_vertex_name = mono_vertex_obj + .metadata + .and_then(|metadata| metadata.name) + .ok_or_else(|| Error::ConfigError("Mono vertex name not found".to_string()))?; + + settings.is_transformer_enabled = mono_vertex_obj + .spec + .source + .ok_or(Error::ConfigError("Source not found".to_string()))? + .transformer + .is_some(); + } + + settings.log_level = + env::var(ENV_LOG_LEVEL).unwrap_or_else(|_| LevelFilter::Info.to_string()); + + settings.grpc_max_message_size = env::var(ENV_GRPC_MAX_MESSAGE_SIZE) + .unwrap_or_else(|_| DEFAULT_GRPC_MAX_MESSAGE_SIZE.to_string()) + .parse() + .map_err(|e| { + Error::ConfigError(format!("Failed to parse grpc max message size: {:?}", e)) + })?; + + settings.replica = env::var(ENV_POD_REPLICA) + .unwrap_or_else(|_| "0".to_string()) + .parse() + .map_err(|e| Error::ConfigError(format!("Failed to parse pod replica: {:?}", e)))?; + + Ok(settings) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::env; + + #[test] + fn test_settings_load() { + // Set up environment variables + env::set_var(ENV_MONO_VERTEX_OBJ, "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLW1vbm8tdmVydGV4IiwibmFtZXNwYWNlIjoiZGVmYXVsdCIsImNyZWF0aW9uVGltZXN0YW1wIjpudWxsfSwic3BlYyI6eyJyZXBsaWNhcyI6MCwic291cmNlIjp7InRyYW5zZm9ybWVyIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6InF1YXkuaW8vbnVtYWlvL251bWFmbG93LXJzL21hcHQtZXZlbnQtdGltZS1maWx0ZXI6c3RhYmxlIiwicmVzb3VyY2VzIjp7fX0sImJ1aWx0aW4iOm51bGx9LCJ1ZHNvdXJjZSI6eyJjb250YWluZXIiOnsiaW1hZ2UiOiJkb2NrZXIuaW50dWl0LmNvbS9wZXJzb25hbC95aGwwMS9zaW1wbGUtc291cmNlOnN0YWJsZSIsInJlc291cmNlcyI6e319fX0sInNpbmsiOnsidWRzaW5rIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImRvY2tlci5pbnR1aXQuY29tL3BlcnNvbmFsL3lobDAxL2JsYWNraG9sZS1zaW5rOnN0YWJsZSIsInJlc291cmNlcyI6e319fX0sImxpbWl0cyI6eyJyZWFkQmF0Y2hTaXplIjo1MDAsInJlYWRUaW1lb3V0IjoiMXMifSwic2NhbGUiOnt9fSwic3RhdHVzIjp7InJlcGxpY2FzIjowLCJsYXN0VXBkYXRlZCI6bnVsbCwibGFzdFNjYWxlZEF0IjpudWxsfX0="); + env::set_var(ENV_LOG_LEVEL, "debug"); + env::set_var(ENV_GRPC_MAX_MESSAGE_SIZE, "128000000"); + + // Load settings + let settings = Settings::load().unwrap(); + + // Verify settings + assert_eq!(settings.mono_vertex_name, "simple-mono-vertex"); + assert_eq!(settings.batch_size, 500); + assert_eq!(settings.timeout_in_ms, 1000); + assert_eq!(settings.log_level, "debug"); + assert_eq!(settings.grpc_max_message_size, 128000000); + + // Clean up environment variables + env::remove_var(ENV_MONO_VERTEX_OBJ); + env::remove_var(ENV_LOG_LEVEL); + env::remove_var(ENV_GRPC_MAX_MESSAGE_SIZE); + } +} diff --git a/serving/source-sink/src/error.rs b/serving/source-sink/src/error.rs new file mode 100644 index 0000000000..76ae1ce590 --- /dev/null +++ b/serving/source-sink/src/error.rs @@ -0,0 +1,36 @@ +use thiserror::Error; + +pub type Result = std::result::Result; + +#[derive(Error, Debug, Clone)] +pub enum Error { + #[error("Metrics Error - {0}")] + MetricsError(String), + + #[error("Source Error - {0}")] + SourceError(String), + + #[error("Sink Error - {0}")] + SinkError(String), + + #[error("Transformer Error - {0}")] + TransformerError(String), + + #[error("Forwarder Error - {0}")] + ForwarderError(String), + + #[error("Connection Error - {0}")] + ConnectionError(String), + + #[error("gRPC Error - {0}")] + GRPCError(String), + + #[error("Config Error - {0}")] + ConfigError(String), +} + +impl From for Error { + fn from(status: tonic::Status) -> Self { + Error::GRPCError(status.to_string()) + } +} diff --git a/serving/source-sink/src/forwarder.rs b/serving/source-sink/src/forwarder.rs new file mode 100644 index 0000000000..cd39038a7c --- /dev/null +++ b/serving/source-sink/src/forwarder.rs @@ -0,0 +1,415 @@ +use crate::config::config; +use crate::error::{Error, Result}; +use crate::metrics::{ + FORWARDER_ACK_TOTAL, FORWARDER_READ_BYTES_TOTAL, FORWARDER_READ_TOTAL, FORWARDER_WRITE_TOTAL, + MONO_VERTEX_NAME, PARTITION_LABEL, REPLICA_LABEL, VERTEX_TYPE_LABEL, +}; +use crate::sink::SinkClient; +use crate::source::SourceClient; +use crate::transformer::TransformerClient; +use chrono::Utc; +use metrics::counter; +use tokio::sync::oneshot; +use tokio::task::JoinSet; +use tracing::{info, trace}; + +const MONO_VERTEX_TYPE: &str = "mono_vertex"; + +/// Forwarder is responsible for reading messages from the source, applying transformation if +/// transformer is present, writing the messages to the sink, and then acknowledging the messages +/// back to the source. +pub(crate) struct Forwarder { + source_client: SourceClient, + sink_client: SinkClient, + transformer_client: Option, + shutdown_rx: oneshot::Receiver<()>, + common_labels: Vec<(String, String)>, +} + +impl Forwarder { + #[allow(clippy::too_many_arguments)] + pub(crate) async fn new( + source_client: SourceClient, + sink_client: SinkClient, + transformer_client: Option, + shutdown_rx: oneshot::Receiver<()>, + ) -> Result { + let common_labels = vec![ + ( + MONO_VERTEX_NAME.to_string(), + config().mono_vertex_name.clone(), + ), + (VERTEX_TYPE_LABEL.to_string(), MONO_VERTEX_TYPE.to_string()), + (REPLICA_LABEL.to_string(), config().replica.to_string()), + (PARTITION_LABEL.to_string(), "0".to_string()), + ]; + + Ok(Self { + source_client, + sink_client, + transformer_client, + shutdown_rx, + common_labels, + }) + } + + /// run starts the forward-a-chunk loop and exits only after a chunk has been forwarded and ack'ed. + /// this means that, in the happy path scenario a block is always completely processed. + /// this function will return on any error and will cause end up in a non-0 exit code. + pub(crate) async fn run(&mut self) -> Result<()> { + let mut messages_count: u64 = 0; + let mut last_forwarded_at = std::time::Instant::now(); + loop { + // TODO: emit latency metrics, metrics-rs histograms has memory leak issues. + let start_time = tokio::time::Instant::now(); + // two arms, either shutdown or forward-a-chunk + tokio::select! { + _ = &mut self.shutdown_rx => { + info!("Shutdown signal received, stopping forwarder..."); + break; + } + result = self.source_client.read_fn(config().batch_size, config().timeout_in_ms) => { + // Read messages from the source + let messages = result?; + info!("Read batch size: {} and latency - {}ms", messages.len(), start_time.elapsed().as_millis()); + + messages_count += messages.len() as u64; + let bytes_count = messages.iter().map(|msg| msg.value.len() as u64).sum::(); + counter!(FORWARDER_READ_TOTAL, &self.common_labels).increment(messages_count); + counter!(FORWARDER_READ_BYTES_TOTAL, &self.common_labels).increment(bytes_count); + + // Extract offsets from the messages + let offsets = messages.iter().map(|message| message.offset.clone()).collect(); + // Apply transformation if transformer is present + let transformed_messages = if let Some(transformer_client) = &self.transformer_client { + let start_time = tokio::time::Instant::now(); + let mut jh = JoinSet::new(); + for message in messages { + let mut transformer_client = transformer_client.clone(); + jh.spawn(async move { transformer_client.transform_fn(message).await }); + } + + let mut results = Vec::new(); + while let Some(task) = jh.join_next().await { + let result = task.map_err(|e| Error::TransformerError(format!("{:?}", e)))?; + let result = result?; + results.extend(result); + } + info!("Transformer latency - {}ms", start_time.elapsed().as_millis()); + results + } else { + messages + }; + + // Write messages to the sink + // TODO: should we retry writing? what if the error is transient? + // we could rely on gRPC retries and say that any error that is bubbled up is worthy of non-0 exit. + // we need to confirm this via FMEA tests. + let start_time = tokio::time::Instant::now(); + self.sink_client.sink_fn(transformed_messages).await?; + info!("Sink latency - {}ms", start_time.elapsed().as_millis()); + counter!(FORWARDER_WRITE_TOTAL, &self.common_labels).increment(messages_count); + + // Acknowledge the messages + // TODO: should we retry acking? what if the error is transient? + // we could rely on gRPC retries and say that any error that is bubbled up is worthy of non-0 exit. + // we need to confirm this via FMEA tests. + let start_time = tokio::time::Instant::now(); + self.source_client.ack_fn(offsets).await?; + info!("Ack latency - {}ms", start_time.elapsed().as_millis()); + + counter!(FORWARDER_ACK_TOTAL, &self.common_labels).increment(messages_count); + trace!("Forwarded {} messages", messages_count); + } + } + // if the last forward was more than 1 second ago, forward a chunk print the number of messages forwarded + if last_forwarded_at.elapsed().as_millis() >= 1000 { + info!( + "Forwarded {} messages at time {}", + messages_count, + Utc::now() + ); + messages_count = 0; + last_forwarded_at = std::time::Instant::now(); + } + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use chrono::Utc; + use numaflow::source::{Message, Offset, SourceReadRequest}; + use numaflow::{sink, source, sourcetransform}; + use tokio::sync::mpsc::Sender; + + use crate::forwarder::Forwarder; + use crate::sink::{SinkClient, SinkConfig}; + use crate::source::{SourceClient, SourceConfig}; + use crate::transformer::{TransformerClient, TransformerConfig}; + + struct SimpleSource { + yet_to_be_acked: std::sync::RwLock>, + } + + impl SimpleSource { + fn new() -> Self { + Self { + yet_to_be_acked: std::sync::RwLock::new(HashSet::new()), + } + } + } + + #[tonic::async_trait] + impl source::Sourcer for SimpleSource { + async fn read(&self, request: SourceReadRequest, transmitter: Sender) { + let event_time = Utc::now(); + let mut message_offsets = Vec::with_capacity(request.count); + for i in 0..2 { + let offset = format!("{}-{}", event_time.timestamp_nanos_opt().unwrap(), i); + transmitter + .send(Message { + value: "test-message".as_bytes().to_vec(), + event_time, + offset: Offset { + offset: offset.clone().into_bytes(), + partition_id: 0, + }, + keys: vec!["test-key".to_string()], + headers: Default::default(), + }) + .await + .unwrap(); + message_offsets.push(offset) + } + self.yet_to_be_acked + .write() + .unwrap() + .extend(message_offsets) + } + + async fn ack(&self, offsets: Vec) { + for offset in offsets { + self.yet_to_be_acked + .write() + .unwrap() + .remove(&String::from_utf8(offset.offset).unwrap()); + } + } + + async fn pending(&self) -> usize { + self.yet_to_be_acked.read().unwrap().len() + } + + async fn partitions(&self) -> Option> { + Some(vec![0]) + } + } + + struct SimpleTransformer; + #[tonic::async_trait] + impl sourcetransform::SourceTransformer for SimpleTransformer { + async fn transform( + &self, + input: sourcetransform::SourceTransformRequest, + ) -> Vec { + let keys = input + .keys + .iter() + .map(|k| k.clone() + "-transformed") + .collect(); + let message = sourcetransform::Message::new(input.value, Utc::now()) + .keys(keys) + .tags(vec![]); + vec![message] + } + } + + struct InMemorySink { + sender: Sender, + } + + impl InMemorySink { + fn new(sender: Sender) -> Self { + Self { sender } + } + } + + #[tonic::async_trait] + impl sink::Sinker for InMemorySink { + async fn sink( + &self, + mut input: tokio::sync::mpsc::Receiver, + ) -> Vec { + let mut responses: Vec = Vec::new(); + while let Some(datum) = input.recv().await { + let response = match std::str::from_utf8(&datum.value) { + Ok(_) => { + self.sender + .send(Message { + value: datum.value.clone(), + event_time: datum.event_time, + offset: Offset { + offset: "test-offset".to_string().into_bytes(), + partition_id: 0, + }, + keys: datum.keys.clone(), + headers: Default::default(), + }) + .await + .unwrap(); + sink::Response::ok(datum.id) + } + Err(e) => { + sink::Response::failure(datum.id, format!("Invalid UTF-8 sequence: {}", e)) + } + }; + responses.push(response); + } + responses + } + } + + #[tokio::test] + async fn test_forwarder_source_sink() { + // Create channels for communication + let (sink_tx, mut sink_rx) = tokio::sync::mpsc::channel(10); + + // Start the source server + let (source_shutdown_tx, source_shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let source_sock_file = tmp_dir.path().join("source.sock"); + let server_info_file = tmp_dir.path().join("source-server-info"); + + let server_info = server_info_file.clone(); + let source_socket = source_sock_file.clone(); + let source_server_handle = tokio::spawn(async move { + source::Server::new(SimpleSource::new()) + .with_socket_file(source_socket) + .with_server_info_file(server_info) + .start_with_shutdown(source_shutdown_rx) + .await + .unwrap(); + }); + let source_config = SourceConfig { + socket_path: source_sock_file.to_str().unwrap().to_string(), + server_info_file: server_info_file.to_str().unwrap().to_string(), + max_message_size: 4 * 1024 * 1024, + }; + + // Start the sink server + let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); + let sink_tmp_dir = tempfile::TempDir::new().unwrap(); + let sink_sock_file = sink_tmp_dir.path().join("sink.sock"); + let server_info_file = sink_tmp_dir.path().join("sink-server-info"); + + let server_info = server_info_file.clone(); + let sink_socket = sink_sock_file.clone(); + let sink_server_handle = tokio::spawn(async move { + sink::Server::new(InMemorySink::new(sink_tx)) + .with_socket_file(sink_socket) + .with_server_info_file(server_info) + .start_with_shutdown(sink_shutdown_rx) + .await + .unwrap(); + }); + let sink_config = SinkConfig { + socket_path: sink_sock_file.to_str().unwrap().to_string(), + server_info_file: server_info_file.to_str().unwrap().to_string(), + max_message_size: 4 * 1024 * 1024, + }; + + // Start the transformer server + let (transformer_shutdown_tx, transformer_shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let transformer_sock_file = tmp_dir.path().join("transformer.sock"); + let server_info_file = tmp_dir.path().join("transformer-server-info"); + + let server_info = server_info_file.clone(); + let transformer_socket = transformer_sock_file.clone(); + let transformer_server_handle = tokio::spawn(async move { + sourcetransform::Server::new(SimpleTransformer) + .with_socket_file(transformer_socket) + .with_server_info_file(server_info) + .start_with_shutdown(transformer_shutdown_rx) + .await + .unwrap(); + }); + let transformer_config = TransformerConfig { + socket_path: transformer_sock_file.to_str().unwrap().to_string(), + server_info_file: server_info_file.to_str().unwrap().to_string(), + max_message_size: 4 * 1024 * 1024, + }; + + // Wait for the servers to start + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + let (forwarder_shutdown_tx, forwarder_shutdown_rx) = tokio::sync::oneshot::channel(); + + let source_client = SourceClient::connect(source_config) + .await + .expect("failed to connect to source server"); + + let sink_client = SinkClient::connect(sink_config) + .await + .expect("failed to connect to sink server"); + + let transformer_client = TransformerClient::connect(transformer_config) + .await + .expect("failed to connect to transformer server"); + + let mut forwarder = Forwarder::new( + source_client, + sink_client, + Some(transformer_client), + forwarder_shutdown_rx, + ) + .await + .expect("failed to create forwarder"); + + let forwarder_handle = tokio::spawn(async move { + forwarder.run().await.unwrap(); + }); + + // Receive messages from the sink + let received_message = sink_rx.recv().await.unwrap(); + assert_eq!(received_message.value, "test-message".as_bytes()); + assert_eq!( + received_message.keys, + vec!["test-key-transformed".to_string()] + ); + + // stop the forwarder + forwarder_shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + forwarder_handle + .await + .expect("failed to join forwarder task"); + + // stop the servers + source_shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + source_server_handle + .await + .expect("failed to join source server task"); + + transformer_shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + transformer_server_handle + .await + .expect("failed to join transformer server task"); + + sink_shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + sink_server_handle + .await + .expect("failed to join sink server task"); + } +} diff --git a/serving/source-sink/src/lib.rs b/serving/source-sink/src/lib.rs new file mode 100644 index 0000000000..2099bc63b9 --- /dev/null +++ b/serving/source-sink/src/lib.rs @@ -0,0 +1,306 @@ +use std::fs; +use std::time::Duration; + +use tokio::signal; +use tokio::sync::oneshot; +use tokio::task::JoinHandle; +use tokio::time::sleep; +use tracing::{error, info}; + +pub(crate) use crate::error::Error; +use crate::forwarder::Forwarder; +use crate::sink::{SinkClient, SinkConfig}; +use crate::source::{SourceClient, SourceConfig}; +use crate::transformer::{TransformerClient, TransformerConfig}; + +pub(crate) use self::error::Result; + +/// SourcerSinker orchestrates data movement from the Source to the Sink via the optional SourceTransformer. +/// The forward-a-chunk executes the following in an infinite loop till a shutdown signal is received: +/// - Read X messages from the source +/// - Invokes the SourceTransformer concurrently +/// - Calls the Sinker to write the batch to the Sink +/// - Send Acknowledgement back to the Source +pub mod error; + +pub mod metrics; + +pub mod source; + +pub mod sink; + +pub mod transformer; + +pub mod forwarder; + +pub mod config; + +pub mod message; +pub(crate) mod shared; + +/// forwards a chunk of data from the source to the sink via an optional transformer. +/// It takes an optional custom_shutdown_rx for shutting down the forwarder, useful for testing. +pub async fn run_forwarder( + source_config: SourceConfig, + sink_config: SinkConfig, + transformer_config: Option, + custom_shutdown_rx: Option>, +) -> Result<()> { + wait_for_server_info(&source_config.server_info_file).await?; + let mut source_client = SourceClient::connect(source_config).await?; + + // start the lag reader to publish lag metrics + let mut lag_reader = metrics::LagReader::new(source_client.clone(), None, None); + lag_reader.start().await; + + wait_for_server_info(&sink_config.server_info_file).await?; + let mut sink_client = SinkClient::connect(sink_config).await?; + + let mut transformer_client = if let Some(config) = transformer_config { + wait_for_server_info(&config.server_info_file).await?; + Some(TransformerClient::connect(config).await?) + } else { + None + }; + + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + + // readiness check for all the ud containers + wait_until_ready( + &mut source_client, + &mut sink_client, + &mut transformer_client, + ) + .await?; + + // TODO: use builder pattern of options like TIMEOUT, BATCH_SIZE, etc? + let mut forwarder = + Forwarder::new(source_client, sink_client, transformer_client, shutdown_rx).await?; + + let forwarder_handle: JoinHandle> = tokio::spawn(async move { + forwarder.run().await?; + Ok(()) + }); + + let shutdown_handle: JoinHandle> = tokio::spawn(async move { + shutdown_signal(custom_shutdown_rx).await; + shutdown_tx + .send(()) + .map_err(|_| Error::ForwarderError("Failed to send shutdown signal".to_string()))?; + Ok(()) + }); + + forwarder_handle + .await + .unwrap_or_else(|e| { + error!("Forwarder task panicked: {:?}", e); + Err(Error::ForwarderError("Forwarder task panicked".to_string())) + }) + .unwrap_or_else(|e| { + error!("Forwarder failed: {:?}", e); + }); + + if !shutdown_handle.is_finished() { + shutdown_handle.abort(); + } + + lag_reader.shutdown().await; + info!("Forwarder stopped gracefully"); + Ok(()) +} + +async fn wait_for_server_info(file_path: &str) -> Result<()> { + loop { + if let Ok(metadata) = fs::metadata(file_path) { + if metadata.len() > 0 { + return Ok(()); + } + } + info!("Server info file {} is not ready, waiting...", file_path); + sleep(Duration::from_secs(1)).await; + } +} + +async fn wait_until_ready( + source_client: &mut SourceClient, + sink_client: &mut SinkClient, + transformer_client: &mut Option, +) -> Result<()> { + loop { + let source_ready = source_client.is_ready().await.is_ok(); + if !source_ready { + info!("UDSource is not ready, waiting..."); + } + + let sink_ready = sink_client.is_ready().await.is_ok(); + if !sink_ready { + info!("UDSink is not ready, waiting..."); + } + + let transformer_ready = if let Some(client) = transformer_client { + let ready = client.is_ready().await.is_ok(); + if !ready { + info!("UDTransformer is not ready, waiting..."); + } + ready + } else { + true + }; + + if source_ready && sink_ready && transformer_ready { + break; + } + + sleep(Duration::from_secs(1)).await; + } + + Ok(()) +} + +async fn shutdown_signal(shutdown_rx: Option>) { + let ctrl_c = async { + signal::ctrl_c() + .await + .expect("failed to install Ctrl+C handler"); + info!("Received Ctrl+C signal"); + }; + + let terminate = async { + signal::unix::signal(signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + info!("Received terminate signal"); + }; + + let custom_shutdown = async { + if let Some(rx) = shutdown_rx { + rx.await.ok(); + } else { + // Create a watch channel that never sends + let (_tx, mut rx) = tokio::sync::watch::channel(()); + rx.changed().await.ok(); + } + info!("Received custom shutdown signal"); + }; + + tokio::select! { + _ = ctrl_c => {}, + _ = terminate => {}, + _ = custom_shutdown => {}, + } +} + +#[cfg(test)] +mod tests { + use std::env; + + use numaflow::source::{Message, Offset, SourceReadRequest}; + use numaflow::{sink, source}; + use tokio::sync::mpsc::Sender; + + use crate::sink::SinkConfig; + use crate::source::SourceConfig; + + struct SimpleSource; + #[tonic::async_trait] + impl source::Sourcer for SimpleSource { + async fn read(&self, _: SourceReadRequest, _: Sender) {} + + async fn ack(&self, _: Vec) {} + + async fn pending(&self) -> usize { + 0 + } + + async fn partitions(&self) -> Option> { + None + } + } + + struct SimpleSink; + + #[tonic::async_trait] + impl sink::Sinker for SimpleSink { + async fn sink( + &self, + _input: tokio::sync::mpsc::Receiver, + ) -> Vec { + vec![] + } + } + #[tokio::test] + async fn run_forwarder() { + let (src_shutdown_tx, src_shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let src_sock_file = tmp_dir.path().join("source.sock"); + let src_info_file = tmp_dir.path().join("source-server-info"); + + let server_info = src_info_file.clone(); + let server_socket = src_sock_file.clone(); + let src_server_handle = tokio::spawn(async move { + source::Server::new(SimpleSource) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(src_shutdown_rx) + .await + .unwrap(); + }); + let source_config = SourceConfig { + socket_path: src_sock_file.to_str().unwrap().to_string(), + server_info_file: src_info_file.to_str().unwrap().to_string(), + max_message_size: 100, + }; + + let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let sink_sock_file = tmp_dir.path().join("sink.sock"); + let sink_server_info = tmp_dir.path().join("sink-server-info"); + + let server_socket = sink_sock_file.clone(); + let server_info = sink_server_info.clone(); + let sink_server_handle = tokio::spawn(async move { + sink::Server::new(SimpleSink) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(sink_shutdown_rx) + .await + .unwrap(); + }); + let sink_config = SinkConfig { + socket_path: sink_sock_file.to_str().unwrap().to_string(), + server_info_file: sink_server_info.to_str().unwrap().to_string(), + max_message_size: 100, + }; + + // wait for the servers to start + // FIXME: we need to have a better way, this is flaky + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + env::set_var("SOURCE_SOCKET", src_sock_file.to_str().unwrap()); + env::set_var("SINK_SOCKET", sink_sock_file.to_str().unwrap()); + + let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + + let forwarder_handle = tokio::spawn(async move { + let result = + super::run_forwarder(source_config, sink_config, None, Some(shutdown_rx)).await; + assert!(result.is_ok()); + }); + + // wait for the forwarder to start + // FIXME: we need to have a better way, this is flaky + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + // stop the forwarder + shutdown_tx.send(()).unwrap(); + forwarder_handle.await.unwrap(); + + // stop the source and sink servers + src_shutdown_tx.send(()).unwrap(); + sink_shutdown_tx.send(()).unwrap(); + + src_server_handle.await.unwrap(); + sink_server_handle.await.unwrap(); + } +} diff --git a/serving/source-sink/src/main.rs b/serving/source-sink/src/main.rs new file mode 100644 index 0000000000..aa1d8c0605 --- /dev/null +++ b/serving/source-sink/src/main.rs @@ -0,0 +1,64 @@ +use log::Level::Info; +use sourcer_sinker::config::config; +use sourcer_sinker::sink::SinkConfig; +use sourcer_sinker::source::SourceConfig; +use sourcer_sinker::transformer::TransformerConfig; +use sourcer_sinker::run_forwarder; +use std::env; +use std::net::SocketAddr; +use tracing::error; +use tracing::level_filters::LevelFilter; +use tracing_subscriber::EnvFilter; +use sourcer_sinker::metrics::start_metrics_https_server; + +#[tokio::main] +async fn main() { + let log_level = env::var("NUMAFLOW_DEBUG").unwrap_or_else(|_| Info.to_string()); + // Initialize the logger + tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::INFO.into()) + .parse_lossy(log_level), + ) + .with_target(false) + .init(); + + // Start the metrics server, which server the prometheus metrics. + // TODO: make the port configurable. + let metrics_addr: SocketAddr = "0.0.0.0:2469".parse().expect("Invalid address"); + + // Start the metrics server in a separate background async spawn, + // This should be running throughout the lifetime of the application, hence the handle is not + // joined. + tokio::spawn(async move { + if let Err(e) = start_metrics_https_server(metrics_addr).await { + error!("Metrics server error: {:?}", e); + } + }); + + // Initialize the source, sink and transformer configurations + // We are using the default configurations for now. + let source_config = SourceConfig { + max_message_size: config().grpc_max_message_size, + ..Default::default() + }; + + let sink_config = SinkConfig { + max_message_size: config().grpc_max_message_size, + ..Default::default() + }; + let transformer_config = if config().is_transformer_enabled { + Some(TransformerConfig { + max_message_size: config().grpc_max_message_size, + ..Default::default() + }) + } else { + None + }; + + // Run the forwarder + if let Err(e) = run_forwarder(source_config, sink_config, transformer_config, None).await { + error!("Application error: {:?}", e); + } +} diff --git a/serving/source-sink/src/message.rs b/serving/source-sink/src/message.rs new file mode 100644 index 0000000000..1ca69e9878 --- /dev/null +++ b/serving/source-sink/src/message.rs @@ -0,0 +1,85 @@ +use std::collections::HashMap; + +use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; +use base64::Engine; +use chrono::{DateTime, Utc}; + +use crate::error::Error; +use crate::shared::{prost_timestamp_from_utc, utc_from_timestamp}; +use crate::sink::proto; +use crate::source::proto::read_response; +use crate::transformer::proto::SourceTransformRequest; + +/// A message that is sent from the source to the sink. +#[derive(Debug, Clone)] +pub(crate) struct Message { + /// keys of the message + pub(crate) keys: Vec, + /// actual payload of the message + pub(crate) value: Vec, + /// offset of the message + pub(crate) offset: Offset, + /// event time of the message + pub(crate) event_time: DateTime, + /// headers of the message + pub(crate) headers: HashMap, +} + +/// Offset of the message which will be used to acknowledge the message. +#[derive(Debug, Clone)] +pub(crate) struct Offset { + /// unique identifier of the message + pub(crate) offset: String, + /// partition id of the message + pub(crate) partition_id: i32, +} + +/// Convert the [`Message`] to [`SourceTransformRequest`] +impl From for SourceTransformRequest { + fn from(message: Message) -> Self { + Self { + keys: message.keys, + value: message.value, + event_time: prost_timestamp_from_utc(message.event_time), + watermark: None, + headers: message.headers, + } + } +} + +/// Convert [`read_response::Result`] to [`Message`] +impl TryFrom for Message { + type Error = crate::Error; + + fn try_from(result: read_response::Result) -> Result { + let source_offset = match result.offset { + Some(o) => Offset { + offset: BASE64_STANDARD.encode(o.offset), + partition_id: o.partition_id, + }, + None => return Err(Error::SourceError("Offset not found".to_string())), + }; + + Ok(Message { + keys: result.keys, + value: result.payload, + offset: source_offset, + event_time: utc_from_timestamp(result.event_time), + headers: result.headers, + }) + } +} + +/// Convert [`Message`] to [`proto::SinkRequest`] +impl From for proto::SinkRequest { + fn from(message: Message) -> Self { + Self { + keys: message.keys, + value: message.value, + event_time: prost_timestamp_from_utc(message.event_time), + watermark: None, + id: format!("{}-{}", message.offset.partition_id, message.offset.offset), + headers: message.headers, + } + } +} diff --git a/serving/source-sink/src/metrics.rs b/serving/source-sink/src/metrics.rs new file mode 100644 index 0000000000..d257609c76 --- /dev/null +++ b/serving/source-sink/src/metrics.rs @@ -0,0 +1,332 @@ +use std::future::ready; +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::Duration; + +use axum::http::StatusCode; +use axum::response::IntoResponse; +use axum::{routing::get, Router}; +use axum_server::tls_rustls::RustlsConfig; +use log::info; +use metrics::describe_counter; +use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle}; +use rcgen::{CertifiedKey, generate_simple_self_signed}; +use tokio::net::{TcpListener, ToSocketAddrs}; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; +use tokio::time; +use tokio_util::sync::CancellationToken; +use tracing::{debug, error}; + +use crate::error::Error; +use crate::source::SourceClient; + +// Define the labels for the metrics +pub const MONO_VERTEX_NAME: &str = "vertex"; +pub const REPLICA_LABEL: &str = "replica"; +pub const PARTITION_LABEL: &str = "partition_name"; +pub const VERTEX_TYPE_LABEL: &str = "vertex_type"; + +// Define the metrics +pub const FORWARDER_READ_TOTAL: &str = "forwarder_read_total"; +pub const FORWARDER_READ_BYTES_TOTAL: &str = "forwarder_read_bytes_total"; + +pub const FORWARDER_ACK_TOTAL: &str = "forwarder_ack_total"; +pub const FORWARDER_WRITE_TOTAL: &str = "forwarder_write_total"; + +/// Collect and emit prometheus metrics. +/// Metrics router and server +pub async fn start_metrics_http_server(addr: A) -> crate::Result<()> +where + A: ToSocketAddrs + std::fmt::Debug, +{ + // setup_metrics_recorder should only be invoked once + let recorder_handle = setup_metrics_recorder()?; + + let metrics_app = Router::new() + .route("/metrics", get(move || ready(recorder_handle.render()))) + .route("/livez", get(livez)) + .route("/readyz", get(readyz)) + .route("/sidecar-livez", get(sidecar_livez)); + + let listener = TcpListener::bind(&addr) + .await + .map_err(|e| Error::MetricsError(format!("Creating listener on {:?}: {}", addr, e)))?; + + debug!("metrics server started at addr: {:?}", addr); + + axum::serve(listener, metrics_app) + .await + .map_err(|e| Error::MetricsError(format!("Starting web server for metrics: {}", e)))?; + Ok(()) +} + +pub async fn start_metrics_https_server(addr: SocketAddr) -> crate::Result<()> +where +{ + let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); + + // Generate a self-signed certificate + let CertifiedKey { cert, key_pair } = generate_simple_self_signed(vec!["localhost".into()]) + .map_err(|e| Error::MetricsError(format!("Generating self-signed certificate: {}", e)))?; + + + let tls_config = RustlsConfig::from_pem(cert.pem().into(), key_pair.serialize_pem().into()) + .await + .map_err(|e| Error::MetricsError(format!("Creating tlsConfig from pem: {}", e)))?; + + // setup_metrics_recorder should only be invoked once + let recorder_handle = setup_metrics_recorder()?; + + let metrics_app = Router::new() + .route("/metrics", get(move || ready(recorder_handle.render()))) + .route("/livez", get(livez)) + .route("/readyz", get(readyz)) + .route("/sidecar-livez", get(sidecar_livez)); + + axum_server::bind_rustls(addr, tls_config) + .serve(metrics_app.into_make_service()) + .await + .map_err(|e| Error::MetricsError(format!("Starting web server for metrics: {}", e)))?; + + Ok(()) +} + +async fn livez() -> impl IntoResponse { + StatusCode::NO_CONTENT +} + +async fn readyz() -> impl IntoResponse { + StatusCode::NO_CONTENT +} + +async fn sidecar_livez() -> impl IntoResponse { + StatusCode::NO_CONTENT +} + +/// setup the Prometheus metrics recorder. +fn setup_metrics_recorder() -> crate::Result { + // 1 micro-sec < t < 1000 seconds + let log_to_power_of_sqrt2_bins: [f64; 62] = (0..62) + .map(|i| 2_f64.sqrt().powf(i as f64)) + .collect::>() + .try_into() + .unwrap(); + + let prometheus_handle = PrometheusBuilder::new() + .set_buckets_for_metric( + Matcher::Full("fac_total_duration_micros".to_string()), // fac == forward-a-chunk + &log_to_power_of_sqrt2_bins, + ) + .map_err(|e| Error::MetricsError(format!("Prometheus install_recorder: {}", e)))? + .install_recorder() + .map_err(|e| Error::MetricsError(format!("Prometheus install_recorder: {}", e)))?; + + // Define forwarder metrics + describe_counter!( + FORWARDER_READ_TOTAL, + "Total number of Data Messages Read in the forwarder" + ); + describe_counter!( + FORWARDER_READ_BYTES_TOTAL, + "Total number of bytes read in the forwarder" + ); + describe_counter!( + FORWARDER_ACK_TOTAL, + "Total number of acknowledgments by the forwarder" + ); + describe_counter!( + FORWARDER_WRITE_TOTAL, + "Total number of Data Messages written by the forwarder" + ); + Ok(prometheus_handle) +} + +const MAX_PENDING_STATS: usize = 1800; + +// Pending info with timestamp +struct TimestampedPending { + pending: i64, + timestamp: std::time::Instant, +} + +/// `LagReader` is responsible for periodically checking the lag of the source client +/// and exposing the metrics. It maintains a list of pending stats and ensures that +/// only the most recent entries are kept. +pub(crate) struct LagReader { + source_client: SourceClient, + lag_checking_interval: Duration, + refresh_interval: Duration, + cancellation_token: CancellationToken, + buildup_handle: Option>, + expose_handle: Option>, + pending_stats: Arc>>, +} + +impl LagReader { + /// Creates a new `LagReader` instance. + pub(crate) fn new( + source_client: SourceClient, + lag_checking_interval: Option, + refresh_interval: Option, + ) -> Self { + Self { + source_client, + lag_checking_interval: lag_checking_interval.unwrap_or_else(|| Duration::from_secs(3)), + refresh_interval: refresh_interval.unwrap_or_else(|| Duration::from_secs(5)), + cancellation_token: CancellationToken::new(), + buildup_handle: None, + expose_handle: None, + pending_stats: Arc::new(Mutex::new(Vec::new())), + } + } + + /// Starts the lag reader by spawning tasks to build up pending info and expose pending metrics. + /// + /// This method spawns two asynchronous tasks: + /// - One to periodically check the lag and update the pending stats. + /// - Another to periodically expose the pending metrics. + pub async fn start(&mut self) { + let token = self.cancellation_token.clone(); + let source_client = self.source_client.clone(); + let lag_checking_interval = self.lag_checking_interval; + let refresh_interval = self.refresh_interval; + let pending_stats = self.pending_stats.clone(); + + self.buildup_handle = Some(tokio::spawn(async move { + buildup_pending_info(source_client, token, lag_checking_interval, pending_stats).await; + })); + + let token = self.cancellation_token.clone(); + let pending_stats = self.pending_stats.clone(); + self.expose_handle = Some(tokio::spawn(async move { + expose_pending_metrics(token, refresh_interval, pending_stats).await; + })); + } + + /// Shuts down the lag reader by cancelling the tasks and waiting for them to complete. + pub(crate) async fn shutdown(self) { + self.cancellation_token.cancel(); + if let Some(handle) = self.buildup_handle { + let _ = handle.await; + } + if let Some(handle) = self.expose_handle { + let _ = handle.await; + } + } +} + +// Periodically checks the pending messages from the source client and updates the pending stats. +async fn buildup_pending_info( + mut source_client: SourceClient, + cancellation_token: CancellationToken, + lag_checking_interval: Duration, + pending_stats: Arc>>, +) { + let mut ticker = time::interval(lag_checking_interval); + loop { + tokio::select! { + _ = cancellation_token.cancelled() => { + return; + } + _ = ticker.tick() => { + match source_client.pending_fn().await { + Ok(pending) => { + if pending != -1 { + let mut stats = pending_stats.lock().await; + stats.push(TimestampedPending { + pending, + timestamp: std::time::Instant::now(), + }); + let n = stats.len(); + // Ensure only the most recent MAX_PENDING_STATS entries are kept + if n > MAX_PENDING_STATS { + stats.drain(0..(n - MAX_PENDING_STATS)); + } + } + } + Err(err) => { + error!("Failed to get pending messages: {:?}", err); + } + } + } + } + } +} + +// Periodically exposes the pending metrics by calculating the average pending messages over different intervals. +async fn expose_pending_metrics( + cancellation_token: CancellationToken, + refresh_interval: Duration, + pending_stats: Arc>>, +) { + let mut ticker = time::interval(refresh_interval); + let lookback_seconds_map = vec![("1m", 60), ("5m", 300), ("15m", 900)]; + loop { + tokio::select! { + _ = cancellation_token.cancelled() => { + return; + } + _ = ticker.tick() => { + for (label, seconds) in &lookback_seconds_map { + let pending = calculate_pending(*seconds, &pending_stats).await; + if pending != -1 { + // TODO: emit it as a metric + info!("Pending messages ({}): {}", label, pending); + } + } + } + } + } +} + +// Calculate the average pending messages over the last `seconds` seconds. +async fn calculate_pending( + seconds: i64, + pending_stats: &Arc>>, +) -> i64 { + let mut result = -1; + let mut total = 0; + let mut num = 0; + let now = std::time::Instant::now(); + + let stats = pending_stats.lock().await; + for item in stats.iter().rev() { + if now.duration_since(item.timestamp).as_secs() < seconds as u64 { + total += item.pending; + num += 1; + } else { + break; + } + } + + if num > 0 { + result = total / num; + } + + result +} +#[cfg(test)] +mod tests { + use std::net::SocketAddr; + use std::time::Duration; + + use tokio::time::sleep; + + use super::*; + + #[tokio::test] + async fn test_start_metrics_server() { + let addr = SocketAddr::from(([127, 0, 0, 1], 0)); + let server = tokio::spawn(async move { + let result = start_metrics_http_server(addr).await; + assert!(result.is_ok()) + }); + + // Give the server a little bit of time to start + sleep(Duration::from_millis(100)).await; + + // Stop the server + server.abort(); + } +} diff --git a/serving/source-sink/src/shared.rs b/serving/source-sink/src/shared.rs new file mode 100644 index 0000000000..2c63244647 --- /dev/null +++ b/serving/source-sink/src/shared.rs @@ -0,0 +1,38 @@ +use std::path::PathBuf; + +use chrono::{DateTime, TimeZone, Timelike, Utc}; +use prost_types::Timestamp; +use tokio::net::UnixStream; +use tonic::transport::{Channel, Endpoint, Uri}; +use tower::service_fn; + +use crate::error::Error; + +pub(crate) fn utc_from_timestamp(t: Option) -> DateTime { + t.map_or(Utc.timestamp_nanos(-1), |t| { + DateTime::from_timestamp(t.seconds, t.nanos as u32).unwrap_or(Utc.timestamp_nanos(-1)) + }) +} + +pub(crate) fn prost_timestamp_from_utc(t: DateTime) -> Option { + Some(Timestamp { + seconds: t.timestamp(), + nanos: t.nanosecond() as i32, + }) +} + +pub(crate) async fn connect_with_uds(uds_path: PathBuf) -> Result { + let channel = Endpoint::try_from("http://[::]:50051") + .map_err(|e| Error::ConnectionError(format!("Failed to create endpoint: {:?}", e)))? + .connect_with_connector(service_fn(move |_: Uri| { + let uds_socket = uds_path.clone(); + async move { + Ok::<_, std::io::Error>(hyper_util::rt::TokioIo::new( + UnixStream::connect(uds_socket).await?, + )) + } + })) + .await + .map_err(|e| Error::ConnectionError(format!("Failed to connect: {:?}", e)))?; + Ok(channel) +} diff --git a/serving/source-sink/src/sink.rs b/serving/source-sink/src/sink.rs new file mode 100644 index 0000000000..e2801873df --- /dev/null +++ b/serving/source-sink/src/sink.rs @@ -0,0 +1,174 @@ +use tonic::transport::Channel; +use tonic::Request; + +use crate::error::Result; +use crate::message::Message; +use crate::shared::connect_with_uds; + +pub mod proto { + tonic::include_proto!("sink.v1"); +} + +const SINK_SOCKET: &str = "/var/run/numaflow/sink.sock"; +const SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/sinker-server-info"; + +/// SinkConfig is the configuration for the sink server. +#[derive(Debug, Clone)] +pub struct SinkConfig { + pub socket_path: String, + pub server_info_file: String, + pub max_message_size: usize, +} + +impl Default for SinkConfig { + fn default() -> Self { + SinkConfig { + socket_path: SINK_SOCKET.to_string(), + server_info_file: SINK_SERVER_INFO_FILE.to_string(), + max_message_size: 64 * 1024 * 1024, // 64 MB + } + } +} + +/// SinkClient is a client to interact with the sink server. +pub struct SinkClient { + client: proto::sink_client::SinkClient, +} + +impl SinkClient { + pub(crate) async fn connect(config: SinkConfig) -> Result { + let channel = connect_with_uds(config.socket_path.into()).await?; + let client = proto::sink_client::SinkClient::new(channel) + .max_decoding_message_size(config.max_message_size) + .max_encoding_message_size(config.max_message_size); + Ok(Self { client }) + } + + pub(crate) async fn sink_fn(&mut self, messages: Vec) -> Result { + let requests: Vec = + messages.into_iter().map(|message| message.into()).collect(); + + let (tx, rx) = tokio::sync::mpsc::channel(1); + + tokio::spawn(async move { + for request in requests { + if tx.send(request).await.is_err() { + break; + } + } + }); + + // TODO: retry for response with failure status + let response = self + .client + .sink_fn(tokio_stream::wrappers::ReceiverStream::new(rx)) + .await? + .into_inner(); + Ok(response) + } + + pub(crate) async fn is_ready(&mut self) -> Result { + let request = Request::new(()); + let response = self.client.is_ready(request).await?.into_inner(); + Ok(response) + } +} + +#[cfg(test)] +mod tests { + use chrono::offset::Utc; + use log::info; + use numaflow::sink; + + use crate::message::Offset; + + use super::*; + + struct Logger; + #[tonic::async_trait] + impl sink::Sinker for Logger { + async fn sink( + &self, + mut input: tokio::sync::mpsc::Receiver, + ) -> Vec { + let mut responses: Vec = Vec::new(); + while let Some(datum) = input.recv().await { + let response = match std::str::from_utf8(&datum.value) { + Ok(v) => { + info!("{}", v); + sink::Response::ok(datum.id) + } + Err(e) => { + sink::Response::failure(datum.id, format!("Invalid UTF-8 sequence: {}", e)) + } + }; + responses.push(response); + } + responses + } + } + #[tokio::test] + async fn sink_operations() { + // start the server + let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("sink.sock"); + let server_info_file = tmp_dir.path().join("sink-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let server_handle = tokio::spawn(async move { + sink::Server::new(Logger) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .unwrap(); + }); + + // wait for the server to start + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + let mut sink_client = SinkClient::connect(SinkConfig { + socket_path: sock_file.to_str().unwrap().to_string(), + server_info_file: server_info_file.to_str().unwrap().to_string(), + max_message_size: 4 * 1024 * 1024, + }) + .await + .expect("failed to connect to sink server"); + + let messages = vec![ + Message { + keys: vec![], + value: b"Hello, World!".to_vec(), + offset: Offset { + offset: "1".to_string(), + partition_id: 0, + }, + event_time: Utc::now(), + headers: Default::default(), + }, + Message { + keys: vec![], + value: b"Hello, World!".to_vec(), + offset: Offset { + offset: "2".to_string(), + partition_id: 0, + }, + event_time: Utc::now(), + headers: Default::default(), + }, + ]; + + let ready_response = sink_client.is_ready().await.unwrap(); + assert_eq!(ready_response.ready, true); + + let response = sink_client.sink_fn(messages).await.unwrap(); + assert_eq!(response.results.len(), 2); + + shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + server_handle.await.expect("failed to join server task"); + } +} diff --git a/serving/source-sink/src/source.rs b/serving/source-sink/src/source.rs new file mode 100644 index 0000000000..3c164bb5e2 --- /dev/null +++ b/serving/source-sink/src/source.rs @@ -0,0 +1,249 @@ +use base64::prelude::BASE64_STANDARD; +use base64::Engine; +use tokio_stream::StreamExt; +use tonic::transport::Channel; +use tonic::Request; + +use crate::error::{Error, Result}; +use crate::message::{Message, Offset}; +use crate::shared::connect_with_uds; + +pub mod proto { + tonic::include_proto!("source.v1"); +} + +const SOURCE_SOCKET: &str = "/var/run/numaflow/source.sock"; +const SOURCE_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcer-server-info"; + +/// SourceConfig is the configuration for the source server. +#[derive(Debug, Clone)] +pub struct SourceConfig { + pub socket_path: String, + pub server_info_file: String, + pub max_message_size: usize, +} + +impl Default for SourceConfig { + fn default() -> Self { + SourceConfig { + socket_path: SOURCE_SOCKET.to_string(), + server_info_file: SOURCE_SERVER_INFO_FILE.to_string(), + max_message_size: 64 * 1024 * 1024, // 64 MB + } + } +} + +/// SourceClient is a client to interact with the source server. +#[derive(Debug, Clone)] +pub(crate) struct SourceClient { + client: proto::source_client::SourceClient, +} + +impl SourceClient { + pub(crate) async fn connect(config: SourceConfig) -> Result { + let channel = connect_with_uds(config.socket_path.into()).await?; + let client = proto::source_client::SourceClient::new(channel) + .max_encoding_message_size(config.max_message_size) + .max_decoding_message_size(config.max_message_size); + + Ok(Self { client }) + } + + pub(crate) async fn read_fn( + &mut self, + num_records: u64, + timeout_in_ms: u32, + ) -> Result> { + let request = Request::new(proto::ReadRequest { + request: Some(proto::read_request::Request { + num_records, + timeout_in_ms, + }), + }); + + let mut stream = self.client.read_fn(request).await?.into_inner(); + let mut messages = Vec::with_capacity(num_records as usize); + + while let Some(response) = stream.next().await { + let result = response? + .result + .ok_or_else(|| Error::SourceError("Empty message".to_string()))?; + + messages.push(result.try_into()?); + } + + Ok(messages) + } + + pub(crate) async fn ack_fn(&mut self, offsets: Vec) -> Result { + let offsets = offsets + .into_iter() + .map(|offset| proto::Offset { + offset: BASE64_STANDARD + .decode(offset.offset) + .expect("we control the encoding, so this should never fail"), + partition_id: offset.partition_id, + }) + .collect(); + + let request = Request::new(proto::AckRequest { + request: Some(proto::ack_request::Request { offsets }), + }); + + Ok(self.client.ack_fn(request).await?.into_inner()) + } + + #[allow(dead_code)] + // TODO: remove dead_code + pub(crate) async fn pending_fn(&mut self) -> Result { + let request = Request::new(()); + let response = self + .client + .pending_fn(request) + .await? + .into_inner() + .result + .map_or(0, |r| r.count); + Ok(response) + } + + #[allow(dead_code)] + // TODO: remove dead_code + pub(crate) async fn partitions_fn(&mut self) -> Result> { + let request = Request::new(()); + let response = self.client.partitions_fn(request).await?.into_inner(); + Ok(response.result.map_or(vec![], |r| r.partitions)) + } + + pub(crate) async fn is_ready(&mut self) -> Result { + let request = Request::new(()); + let response = self.client.is_ready(request).await?.into_inner(); + Ok(response) + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + use std::error::Error; + + use chrono::Utc; + use numaflow::source; + use numaflow::source::{Message, Offset, SourceReadRequest}; + use tokio::sync::mpsc::Sender; + + use crate::source::{SourceClient, SourceConfig}; + + struct SimpleSource { + num: usize, + yet_to_ack: std::sync::RwLock>, + } + + impl SimpleSource { + fn new(num: usize) -> Self { + Self { + num, + yet_to_ack: std::sync::RwLock::new(HashSet::new()), + } + } + } + + #[tonic::async_trait] + impl source::Sourcer for SimpleSource { + async fn read(&self, request: SourceReadRequest, transmitter: Sender) { + let event_time = Utc::now(); + let mut message_offsets = Vec::with_capacity(request.count); + for i in 0..request.count { + let offset = format!("{}-{}", event_time.timestamp_nanos_opt().unwrap(), i); + transmitter + .send(Message { + value: self.num.to_le_bytes().to_vec(), + event_time, + offset: Offset { + offset: offset.clone().into_bytes(), + partition_id: 0, + }, + keys: vec![], + headers: Default::default(), + }) + .await + .unwrap(); + message_offsets.push(offset) + } + self.yet_to_ack.write().unwrap().extend(message_offsets) + } + + async fn ack(&self, offsets: Vec) { + for offset in offsets { + self.yet_to_ack + .write() + .unwrap() + .remove(&String::from_utf8(offset.offset).unwrap()); + } + } + + async fn pending(&self) -> usize { + self.yet_to_ack.read().unwrap().len() + } + + async fn partitions(&self) -> Option> { + Some(vec![2]) + } + } + + #[tokio::test] + async fn source_operations() -> Result<(), Box> { + // start the server + let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("source.sock"); + let server_info_file = tmp_dir.path().join("source-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let server_handle = tokio::spawn(async move { + source::Server::new(SimpleSource::new(10)) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .unwrap(); + }); + + // wait for the server to start + // TODO: flaky + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + let mut source_client = SourceClient::connect(SourceConfig { + socket_path: sock_file.to_str().unwrap().to_string(), + server_info_file: server_info_file.to_str().unwrap().to_string(), + max_message_size: 4 * 1024 * 1024, + }) + .await + .expect("failed to connect to source server"); + + let response = source_client.is_ready().await.unwrap(); + assert!(response.ready); + + let messages = source_client.read_fn(5, 1000).await.unwrap(); + assert_eq!(messages.len(), 5); + + let response = source_client + .ack_fn(messages.iter().map(|m| m.offset.clone()).collect()) + .await + .unwrap(); + assert!(response.result.unwrap().success.is_some()); + + let pending = source_client.pending_fn().await.unwrap(); + assert_eq!(pending, 0); + + let partitions = source_client.partitions_fn().await.unwrap(); + assert_eq!(partitions, vec![2]); + + shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + server_handle.await.expect("failed to join server task"); + Ok(()) + } +} diff --git a/serving/source-sink/src/transformer.rs b/serving/source-sink/src/transformer.rs new file mode 100644 index 0000000000..2bbca45bce --- /dev/null +++ b/serving/source-sink/src/transformer.rs @@ -0,0 +1,159 @@ +use tonic::transport::Channel; +use tonic::Request; + +use crate::error::Result; +use crate::message::Message; +use crate::shared::{connect_with_uds, utc_from_timestamp}; +use crate::transformer::proto::SourceTransformRequest; + +pub mod proto { + tonic::include_proto!("sourcetransformer.v1"); +} + +const TRANSFORMER_SOCKET: &str = "/var/run/numaflow/sourcetransform.sock"; +const TRANSFORMER_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcetransformer-server-info"; + +/// TransformerConfig is the configuration for the transformer server. +#[derive(Debug, Clone)] +pub struct TransformerConfig { + pub socket_path: String, + pub server_info_file: String, + pub max_message_size: usize, +} + +impl Default for TransformerConfig { + fn default() -> Self { + TransformerConfig { + socket_path: TRANSFORMER_SOCKET.to_string(), + server_info_file: TRANSFORMER_SERVER_INFO_FILE.to_string(), + max_message_size: 64 * 1024 * 1024, // 64 MB + } + } +} + +/// TransformerClient is a client to interact with the transformer server. +#[derive(Clone)] +pub struct TransformerClient { + client: proto::source_transform_client::SourceTransformClient, +} + +impl TransformerClient { + pub(crate) async fn connect(config: TransformerConfig) -> Result { + let channel = connect_with_uds(config.socket_path.into()).await?; + let client = proto::source_transform_client::SourceTransformClient::new(channel) + .max_decoding_message_size(config.max_message_size) + .max_encoding_message_size(config.max_message_size); + Ok(Self { client }) + } + + pub(crate) async fn transform_fn(&mut self, message: Message) -> Result> { + // fields which will not be changed + let offset = message.offset.clone(); + let headers = message.headers.clone(); + + // TODO: is this complex? the reason to do this is, tomorrow when we have the normal + // Pipeline CRD, we can require the Into trait. + let response = self + .client + .source_transform_fn(>::into(message)) + .await? + .into_inner(); + + let mut messages = Vec::new(); + for result in response.results { + let message = Message { + keys: result.keys, + value: result.value, + offset: offset.clone(), + event_time: utc_from_timestamp(result.event_time), + headers: headers.clone(), + }; + messages.push(message); + } + + Ok(messages) + } + + pub(crate) async fn is_ready(&mut self) -> Result { + let request = Request::new(()); + let response = self.client.is_ready(request).await?.into_inner(); + Ok(response) + } +} + +#[cfg(test)] +mod tests { + use std::error::Error; + + use numaflow::sourcetransform; + use tempfile::TempDir; + + use crate::transformer::{TransformerClient, TransformerConfig}; + + struct NowCat; + + #[tonic::async_trait] + impl sourcetransform::SourceTransformer for NowCat { + async fn transform( + &self, + input: sourcetransform::SourceTransformRequest, + ) -> Vec { + let message = sourcetransform::Message::new(input.value, chrono::offset::Utc::now()) + .keys(input.keys) + .tags(vec![]); + vec![message] + } + } + + #[tokio::test] + async fn transformer_operations() -> Result<(), Box> { + let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = TempDir::new()?; + let sock_file = tmp_dir.path().join("sourcetransform.sock"); + let server_info_file = tmp_dir.path().join("sourcetransformer-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let handle = tokio::spawn(async move { + sourcetransform::Server::new(NowCat) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + let mut client = TransformerClient::connect(TransformerConfig { + socket_path: sock_file.to_str().unwrap().to_string(), + server_info_file: server_info_file.to_str().unwrap().to_string(), + max_message_size: 4 * 1024 * 1024, + }) + .await?; + + let message = crate::message::Message { + keys: vec!["first".into(), "second".into()], + value: "hello".into(), + offset: crate::message::Offset { + partition_id: 0, + offset: "0".into(), + }, + event_time: chrono::Utc::now(), + headers: Default::default(), + }; + + let resp = client.is_ready().await?; + assert_eq!(resp.ready, true); + + let resp = client.transform_fn(message).await?; + assert_eq!(resp.len(), 1); + + shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + handle.await.expect("failed to join server task"); + Ok(()) + } +} From e1204956ca69b36fc56515c95491ef446a95eba5 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Fri, 9 Aug 2024 21:36:34 -0700 Subject: [PATCH 12/23] chore: mono-vertex code review (#1917) Signed-off-by: Vigith Maurice --- serving/Cargo.lock | 1 - serving/source-sink/Cargo.toml | 1 - serving/source-sink/src/config.rs | 22 ++++++++------- serving/source-sink/src/main.rs | 11 ++++---- serving/source-sink/src/metrics.rs | 43 +++++++++++++++--------------- serving/source-sink/src/sink.rs | 5 ++-- 6 files changed, 43 insertions(+), 40 deletions(-) diff --git a/serving/Cargo.lock b/serving/Cargo.lock index 0e70179df8..e765a7d9a6 100644 --- a/serving/Cargo.lock +++ b/serving/Cargo.lock @@ -2743,7 +2743,6 @@ dependencies = [ "bytes", "chrono", "hyper-util", - "log", "metrics", "metrics-exporter-prometheus", "numaflow", diff --git a/serving/source-sink/Cargo.toml b/serving/source-sink/Cargo.toml index 9db2bbff35..ab1f4dcd1d 100644 --- a/serving/source-sink/Cargo.toml +++ b/serving/source-sink/Cargo.toml @@ -19,7 +19,6 @@ chrono = "0.4.31" base64 = "0.22.1" metrics = { version = "0.23.0", default-features = false } metrics-exporter-prometheus = { version = "0.15.3", default-features = false } -log = "0.4.22" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } hyper-util = "0.1.6" tower = "0.4.13" diff --git a/serving/source-sink/src/config.rs b/serving/source-sink/src/config.rs index 9ac27a3413..8adbc2691d 100644 --- a/serving/source-sink/src/config.rs +++ b/serving/source-sink/src/config.rs @@ -1,10 +1,10 @@ use crate::error::Error; use base64::prelude::BASE64_STANDARD; use base64::Engine; -use log::LevelFilter; use numaflow_models::models::MonoVertex; use std::env; use std::sync::OnceLock; +use tracing::level_filters::LevelFilter; const ENV_MONO_VERTEX_OBJ: &str = "NUMAFLOW_MONO_VERTEX_OBJECT"; const ENV_GRPC_MAX_MESSAGE_SIZE: &str = "NUMAFLOW_GRPC_MAX_MESSAGE_SIZE"; @@ -48,7 +48,7 @@ impl Default for Settings { batch_size: DEFAULT_BATCH_SIZE, timeout_in_ms: DEFAULT_TIMEOUT_IN_MS, metrics_server_listen_port: DEFAULT_METRICS_PORT, - log_level: "info".to_string(), + log_level: LevelFilter::INFO.to_string(), grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, is_transformer_enabled: false, lag_check_interval_in_secs: DEFAULT_LAG_CHECK_INTERVAL_IN_SECS, @@ -105,7 +105,7 @@ impl Settings { } settings.log_level = - env::var(ENV_LOG_LEVEL).unwrap_or_else(|_| LevelFilter::Info.to_string()); + env::var(ENV_LOG_LEVEL).unwrap_or_else(|_| LevelFilter::INFO.to_string()); settings.grpc_max_message_size = env::var(ENV_GRPC_MAX_MESSAGE_SIZE) .unwrap_or_else(|_| DEFAULT_GRPC_MAX_MESSAGE_SIZE.to_string()) @@ -131,9 +131,11 @@ mod tests { #[test] fn test_settings_load() { // Set up environment variables - env::set_var(ENV_MONO_VERTEX_OBJ, "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLW1vbm8tdmVydGV4IiwibmFtZXNwYWNlIjoiZGVmYXVsdCIsImNyZWF0aW9uVGltZXN0YW1wIjpudWxsfSwic3BlYyI6eyJyZXBsaWNhcyI6MCwic291cmNlIjp7InRyYW5zZm9ybWVyIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6InF1YXkuaW8vbnVtYWlvL251bWFmbG93LXJzL21hcHQtZXZlbnQtdGltZS1maWx0ZXI6c3RhYmxlIiwicmVzb3VyY2VzIjp7fX0sImJ1aWx0aW4iOm51bGx9LCJ1ZHNvdXJjZSI6eyJjb250YWluZXIiOnsiaW1hZ2UiOiJkb2NrZXIuaW50dWl0LmNvbS9wZXJzb25hbC95aGwwMS9zaW1wbGUtc291cmNlOnN0YWJsZSIsInJlc291cmNlcyI6e319fX0sInNpbmsiOnsidWRzaW5rIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImRvY2tlci5pbnR1aXQuY29tL3BlcnNvbmFsL3lobDAxL2JsYWNraG9sZS1zaW5rOnN0YWJsZSIsInJlc291cmNlcyI6e319fX0sImxpbWl0cyI6eyJyZWFkQmF0Y2hTaXplIjo1MDAsInJlYWRUaW1lb3V0IjoiMXMifSwic2NhbGUiOnt9fSwic3RhdHVzIjp7InJlcGxpY2FzIjowLCJsYXN0VXBkYXRlZCI6bnVsbCwibGFzdFNjYWxlZEF0IjpudWxsfX0="); - env::set_var(ENV_LOG_LEVEL, "debug"); - env::set_var(ENV_GRPC_MAX_MESSAGE_SIZE, "128000000"); + unsafe { + env::set_var(ENV_MONO_VERTEX_OBJ, "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLW1vbm8tdmVydGV4IiwibmFtZXNwYWNlIjoiZGVmYXVsdCIsImNyZWF0aW9uVGltZXN0YW1wIjpudWxsfSwic3BlYyI6eyJyZXBsaWNhcyI6MCwic291cmNlIjp7InRyYW5zZm9ybWVyIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6InF1YXkuaW8vbnVtYWlvL251bWFmbG93LXJzL21hcHQtZXZlbnQtdGltZS1maWx0ZXI6c3RhYmxlIiwicmVzb3VyY2VzIjp7fX0sImJ1aWx0aW4iOm51bGx9LCJ1ZHNvdXJjZSI6eyJjb250YWluZXIiOnsiaW1hZ2UiOiJkb2NrZXIuaW50dWl0LmNvbS9wZXJzb25hbC95aGwwMS9zaW1wbGUtc291cmNlOnN0YWJsZSIsInJlc291cmNlcyI6e319fX0sInNpbmsiOnsidWRzaW5rIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImRvY2tlci5pbnR1aXQuY29tL3BlcnNvbmFsL3lobDAxL2JsYWNraG9sZS1zaW5rOnN0YWJsZSIsInJlc291cmNlcyI6e319fX0sImxpbWl0cyI6eyJyZWFkQmF0Y2hTaXplIjo1MDAsInJlYWRUaW1lb3V0IjoiMXMifSwic2NhbGUiOnt9fSwic3RhdHVzIjp7InJlcGxpY2FzIjowLCJsYXN0VXBkYXRlZCI6bnVsbCwibGFzdFNjYWxlZEF0IjpudWxsfX0="); + env::set_var(ENV_LOG_LEVEL, "debug"); + env::set_var(ENV_GRPC_MAX_MESSAGE_SIZE, "128000000"); + }; // Load settings let settings = Settings::load().unwrap(); @@ -146,8 +148,10 @@ mod tests { assert_eq!(settings.grpc_max_message_size, 128000000); // Clean up environment variables - env::remove_var(ENV_MONO_VERTEX_OBJ); - env::remove_var(ENV_LOG_LEVEL); - env::remove_var(ENV_GRPC_MAX_MESSAGE_SIZE); + unsafe { + env::remove_var(ENV_MONO_VERTEX_OBJ); + env::remove_var(ENV_LOG_LEVEL); + env::remove_var(ENV_GRPC_MAX_MESSAGE_SIZE); + }; } } diff --git a/serving/source-sink/src/main.rs b/serving/source-sink/src/main.rs index aa1d8c0605..0013e33613 100644 --- a/serving/source-sink/src/main.rs +++ b/serving/source-sink/src/main.rs @@ -1,19 +1,18 @@ -use log::Level::Info; use sourcer_sinker::config::config; +use sourcer_sinker::metrics::start_metrics_https_server; +use sourcer_sinker::run_forwarder; use sourcer_sinker::sink::SinkConfig; use sourcer_sinker::source::SourceConfig; use sourcer_sinker::transformer::TransformerConfig; -use sourcer_sinker::run_forwarder; use std::env; use std::net::SocketAddr; -use tracing::error; use tracing::level_filters::LevelFilter; +use tracing::{error, info}; use tracing_subscriber::EnvFilter; -use sourcer_sinker::metrics::start_metrics_https_server; #[tokio::main] async fn main() { - let log_level = env::var("NUMAFLOW_DEBUG").unwrap_or_else(|_| Info.to_string()); + let log_level = env::var("NUMAFLOW_DEBUG").unwrap_or_else(|_| LevelFilter::INFO.to_string()); // Initialize the logger tracing_subscriber::fmt() .with_env_filter( @@ -61,4 +60,6 @@ async fn main() { if let Err(e) = run_forwarder(source_config, sink_config, transformer_config, None).await { error!("Application error: {:?}", e); } + + info!("Gracefully Exiting..."); } diff --git a/serving/source-sink/src/metrics.rs b/serving/source-sink/src/metrics.rs index d257609c76..b33b5b2fb2 100644 --- a/serving/source-sink/src/metrics.rs +++ b/serving/source-sink/src/metrics.rs @@ -7,16 +7,15 @@ use axum::http::StatusCode; use axum::response::IntoResponse; use axum::{routing::get, Router}; use axum_server::tls_rustls::RustlsConfig; -use log::info; use metrics::describe_counter; use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle}; -use rcgen::{CertifiedKey, generate_simple_self_signed}; +use rcgen::{generate_simple_self_signed, CertifiedKey}; use tokio::net::{TcpListener, ToSocketAddrs}; use tokio::sync::Mutex; use tokio::task::JoinHandle; use tokio::time; use tokio_util::sync::CancellationToken; -use tracing::{debug, error}; +use tracing::{debug, error, info}; use crate::error::Error; use crate::source::SourceClient; @@ -30,7 +29,6 @@ pub const VERTEX_TYPE_LABEL: &str = "vertex_type"; // Define the metrics pub const FORWARDER_READ_TOTAL: &str = "forwarder_read_total"; pub const FORWARDER_READ_BYTES_TOTAL: &str = "forwarder_read_bytes_total"; - pub const FORWARDER_ACK_TOTAL: &str = "forwarder_ack_total"; pub const FORWARDER_WRITE_TOTAL: &str = "forwarder_write_total"; @@ -43,11 +41,7 @@ where // setup_metrics_recorder should only be invoked once let recorder_handle = setup_metrics_recorder()?; - let metrics_app = Router::new() - .route("/metrics", get(move || ready(recorder_handle.render()))) - .route("/livez", get(livez)) - .route("/readyz", get(readyz)) - .route("/sidecar-livez", get(sidecar_livez)); + let metrics_app = metrics_router(recorder_handle); let listener = TcpListener::bind(&addr) .await @@ -70,7 +64,6 @@ where let CertifiedKey { cert, key_pair } = generate_simple_self_signed(vec!["localhost".into()]) .map_err(|e| Error::MetricsError(format!("Generating self-signed certificate: {}", e)))?; - let tls_config = RustlsConfig::from_pem(cert.pem().into(), key_pair.serialize_pem().into()) .await .map_err(|e| Error::MetricsError(format!("Creating tlsConfig from pem: {}", e)))?; @@ -78,11 +71,7 @@ where // setup_metrics_recorder should only be invoked once let recorder_handle = setup_metrics_recorder()?; - let metrics_app = Router::new() - .route("/metrics", get(move || ready(recorder_handle.render()))) - .route("/livez", get(livez)) - .route("/readyz", get(readyz)) - .route("/sidecar-livez", get(sidecar_livez)); + let metrics_app = metrics_router(recorder_handle); axum_server::bind_rustls(addr, tls_config) .serve(metrics_app.into_make_service()) @@ -92,6 +81,16 @@ where Ok(()) } +/// router for metrics and k8s health endpoints +fn metrics_router(recorder_handle: PrometheusHandle) -> Router { + let metrics_app = Router::new() + .route("/metrics", get(move || ready(recorder_handle.render()))) + .route("/livez", get(livez)) + .route("/readyz", get(readyz)) + .route("/sidecar-livez", get(sidecar_livez)); + metrics_app +} + async fn livez() -> impl IntoResponse { StatusCode::NO_CONTENT } @@ -139,6 +138,7 @@ fn setup_metrics_recorder() -> crate::Result { FORWARDER_WRITE_TOTAL, "Total number of Data Messages written by the forwarder" ); + Ok(prometheus_handle) } @@ -177,7 +177,7 @@ impl LagReader { cancellation_token: CancellationToken::new(), buildup_handle: None, expose_handle: None, - pending_stats: Arc::new(Mutex::new(Vec::new())), + pending_stats: Arc::new(Mutex::new(Vec::with_capacity(MAX_PENDING_STATS))), } } @@ -194,7 +194,7 @@ impl LagReader { let pending_stats = self.pending_stats.clone(); self.buildup_handle = Some(tokio::spawn(async move { - buildup_pending_info(source_client, token, lag_checking_interval, pending_stats).await; + build_pending_info(source_client, token, lag_checking_interval, pending_stats).await; })); let token = self.cancellation_token.clone(); @@ -216,8 +216,8 @@ impl LagReader { } } -// Periodically checks the pending messages from the source client and updates the pending stats. -async fn buildup_pending_info( +/// Periodically checks the pending messages from the source client and build the pending stats. +async fn build_pending_info( mut source_client: SourceClient, cancellation_token: CancellationToken, lag_checking_interval: Duration, @@ -240,7 +240,7 @@ async fn buildup_pending_info( }); let n = stats.len(); // Ensure only the most recent MAX_PENDING_STATS entries are kept - if n > MAX_PENDING_STATS { + if n >= MAX_PENDING_STATS { stats.drain(0..(n - MAX_PENDING_STATS)); } } @@ -280,7 +280,7 @@ async fn expose_pending_metrics( } } -// Calculate the average pending messages over the last `seconds` seconds. +/// Calculate the average pending messages over the last `seconds` seconds. async fn calculate_pending( seconds: i64, pending_stats: &Arc>>, @@ -306,6 +306,7 @@ async fn calculate_pending( result } + #[cfg(test)] mod tests { use std::net::SocketAddr; diff --git a/serving/source-sink/src/sink.rs b/serving/source-sink/src/sink.rs index e2801873df..6312287338 100644 --- a/serving/source-sink/src/sink.rs +++ b/serving/source-sink/src/sink.rs @@ -76,11 +76,10 @@ impl SinkClient { #[cfg(test)] mod tests { + use crate::message::Offset; use chrono::offset::Utc; - use log::info; use numaflow::sink; - - use crate::message::Offset; + use tracing::info; use super::*; From 858149379b465a3db35c173e8fd3f6f38e8388bc Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Fri, 9 Aug 2024 21:42:19 -0700 Subject: [PATCH 13/23] feat: add server-info support and versioning to MonoVertex (#1918) Signed-off-by: Sidhant Kohli Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- serving/Cargo.lock | 26 + serving/source-sink/Cargo.toml | 3 + serving/source-sink/src/error.rs | 3 + serving/source-sink/src/lib.rs | 38 +- serving/source-sink/src/server_info.rs | 695 +++++++++++++++++++++++++ 5 files changed, 748 insertions(+), 17 deletions(-) create mode 100644 serving/source-sink/src/server_info.rs diff --git a/serving/Cargo.lock b/serving/Cargo.lock index e765a7d9a6..4bb0fa7c0f 100644 --- a/serving/Cargo.lock +++ b/serving/Cargo.lock @@ -1840,6 +1840,17 @@ dependencies = [ "base64ct", ] +[[package]] +name = "pep440_rs" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "466eada3179c2e069ca897b99006cbb33f816290eaeec62464eea907e22ae385" +dependencies = [ + "once_cell", + "unicode-width", + "unscanny", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -2748,10 +2759,13 @@ dependencies = [ "numaflow", "numaflow-models", "once_cell", + "pep440_rs", "prost", "prost-types", "rcgen", "rustls", + "semver", + "serde", "serde_json", "tempfile", "thiserror", @@ -3282,12 +3296,24 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +[[package]] +name = "unicode-width" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" + [[package]] name = "unsafe-libyaml" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" +[[package]] +name = "unscanny" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9df2af067a7953e9c3831320f35c1cc0600c30d44d9f7a12b01db1cd88d6b47" + [[package]] name = "untrusted" version = "0.9.0" diff --git a/serving/source-sink/Cargo.toml b/serving/source-sink/Cargo.toml index ab1f4dcd1d..26d4027da6 100644 --- a/serving/source-sink/Cargo.toml +++ b/serving/source-sink/Cargo.toml @@ -28,6 +28,9 @@ serde_json = "1.0.122" numaflow-models = { path = "../numaflow-models"} rcgen = "0.13.1" rustls = { version = "0.23.12", features = ["aws_lc_rs"] } +serde = { version = "1.0.204", features = ["derive"] } +semver = "1.0" +pep440_rs = "0.6.6" [dev-dependencies] tower = "0.4.13" diff --git a/serving/source-sink/src/error.rs b/serving/source-sink/src/error.rs index 76ae1ce590..50c5f87e2e 100644 --- a/serving/source-sink/src/error.rs +++ b/serving/source-sink/src/error.rs @@ -27,6 +27,9 @@ pub enum Error { #[error("Config Error - {0}")] ConfigError(String), + + #[error("ServerInfoError Error - {0}")] + ServerInfoError(String), } impl From for Error { diff --git a/serving/source-sink/src/lib.rs b/serving/source-sink/src/lib.rs index 2099bc63b9..35a4d15a1a 100644 --- a/serving/source-sink/src/lib.rs +++ b/serving/source-sink/src/lib.rs @@ -1,11 +1,10 @@ -use std::fs; use std::time::Duration; use tokio::signal; use tokio::sync::oneshot; use tokio::task::JoinHandle; use tokio::time::sleep; -use tracing::{error, info}; +use tracing::{error, info, warn}; pub(crate) use crate::error::Error; use crate::forwarder::Forwarder; @@ -36,6 +35,7 @@ pub mod forwarder; pub mod config; pub mod message; +mod server_info; pub(crate) mod shared; /// forwards a chunk of data from the source to the sink via an optional transformer. @@ -46,18 +46,34 @@ pub async fn run_forwarder( transformer_config: Option, custom_shutdown_rx: Option>, ) -> Result<()> { - wait_for_server_info(&source_config.server_info_file).await?; + server_info::check_for_server_compatibility(&source_config.server_info_file) + .await + .map_err(|e| { + warn!("Error waiting for source server info file: {:?}", e); + Error::ForwarderError("Error waiting for server info file".to_string()) + })?; let mut source_client = SourceClient::connect(source_config).await?; // start the lag reader to publish lag metrics let mut lag_reader = metrics::LagReader::new(source_client.clone(), None, None); lag_reader.start().await; - wait_for_server_info(&sink_config.server_info_file).await?; + server_info::check_for_server_compatibility(&sink_config.server_info_file) + .await + .map_err(|e| { + warn!("Error waiting for sink server info file: {:?}", e); + Error::ForwarderError("Error waiting for server info file".to_string()) + })?; + let mut sink_client = SinkClient::connect(sink_config).await?; let mut transformer_client = if let Some(config) = transformer_config { - wait_for_server_info(&config.server_info_file).await?; + server_info::check_for_server_compatibility(&config.server_info_file) + .await + .map_err(|e| { + warn!("Error waiting for transformer server info file: {:?}", e); + Error::ForwarderError("Error waiting for server info file".to_string()) + })?; Some(TransformerClient::connect(config).await?) } else { None @@ -109,18 +125,6 @@ pub async fn run_forwarder( Ok(()) } -async fn wait_for_server_info(file_path: &str) -> Result<()> { - loop { - if let Ok(metadata) = fs::metadata(file_path) { - if metadata.len() > 0 { - return Ok(()); - } - } - info!("Server info file {} is not ready, waiting...", file_path); - sleep(Duration::from_secs(1)).await; - } -} - async fn wait_until_ready( source_client: &mut SourceClient, sink_client: &mut SinkClient, diff --git a/serving/source-sink/src/server_info.rs b/serving/source-sink/src/server_info.rs new file mode 100644 index 0000000000..fe841fe99e --- /dev/null +++ b/serving/source-sink/src/server_info.rs @@ -0,0 +1,695 @@ +use std::collections::HashMap; +use std::fs; +use std::str::FromStr; +use std::time::Duration; + +use pep440_rs::{Version as PepVersion, VersionSpecifier}; +use semver::{Version, VersionReq}; +use serde::{Deserialize, Serialize}; +use tokio::time::sleep; +use tracing::{info, warn}; + +use crate::error; +use crate::error::Error; +use crate::server_info::version::SdkConstraints; + +// Constant to represent the end of the server info. +// Equivalent to U+005C__END__. +const END: &str = "U+005C__END__"; + +/// ServerInfo structure to store server-related information +#[derive(Serialize, Deserialize, Debug)] +pub(crate) struct ServerInfo { + #[serde(default)] + protocol: String, + #[serde(default)] + language: String, + #[serde(default)] + minimum_numaflow_version: String, + #[serde(default)] + version: String, + #[serde(default)] + metadata: Option>, // Metadata is optional +} + +/// check_for_server_compatibility waits until the server info file is ready and check whether the +/// server is compatible with Numaflow. +pub async fn check_for_server_compatibility(file_path: &str) -> error::Result<()> { + // Read the server info file + let server_info = read_server_info(file_path).await?; + + // Log the server info + info!("Server info file: {:?}", server_info); + + // Extract relevant fields from server info + let sdk_version = &server_info.version; + let min_numaflow_version = &server_info.minimum_numaflow_version; + let sdk_language = &server_info.language; + // Get version information + let version_info = version::get_version_info(); + let numaflow_version = &version_info.version; + + info!("Version_info: {:?}", version_info); + + // Check minimum numaflow version compatibility if specified + if min_numaflow_version.is_empty() { + warn!("Failed to get the minimum numaflow version, skipping numaflow version compatibility check"); + } else if !numaflow_version.contains("latest") + && !numaflow_version.contains(&version_info.git_commit) + { + // Check the compatibility between the SDK and Numaflow versions + // If any error occurs, return the error + check_numaflow_compatibility(numaflow_version, min_numaflow_version)?; + } + + // Check SDK compatibility if version and language are specified + if sdk_version.is_empty() || sdk_language.is_empty() { + warn!("Failed to get the SDK version/language, skipping SDK version compatibility check"); + } else { + // Get minimum supported SDK versions and check compatibility + let min_supported_sdk_versions = version::get_minimum_supported_sdk_versions(); + check_sdk_compatibility(sdk_version, sdk_language, min_supported_sdk_versions)?; + } + + Ok(()) +} + +/// Checks if the given version meets the specified constraint. +fn check_constraint(version: &Version, constraint: &str) -> error::Result<()> { + // Parse the given constraint as a semantic version requirement + let version_req = VersionReq::parse(constraint).map_err(|e| { + Error::ServerInfoError(format!( + "Error parsing constraint: {},\ + constraint string: {}", + e, constraint + )) + })?; + + // Check if the provided version satisfies the parsed constraint + if !version_req.matches(version) { + return Err(Error::ServerInfoError("invalid version".to_string())); + } + + Ok(()) +} + +/// Checks if the current numaflow version is compatible with the given minimum numaflow version. +fn check_numaflow_compatibility( + numaflow_version: &str, + min_numaflow_version: &str, +) -> error::Result<()> { + // Ensure that the minimum numaflow version is specified + if min_numaflow_version.is_empty() { + return Err(Error::ServerInfoError("invalid version".to_string())); + } + + // Parse the provided numaflow version as a semantic version + let numaflow_version_semver = Version::parse(numaflow_version) + .map_err(|e| Error::ServerInfoError(format!("Error parsing Numaflow version: {}", e)))?; + + // Create a version constraint based on the minimum numaflow version + let numaflow_constraint = format!(">={}", min_numaflow_version); + Ok( + check_constraint(&numaflow_version_semver, &numaflow_constraint).map_err(|e| { + Error::ServerInfoError(format!( + "numaflow version {} must be upgraded to at least {}, in order to work with current SDK version {}", + numaflow_version_semver, min_numaflow_version, e + )) + })? + ) +} + +/// Checks if the current SDK version is compatible with the given language's minimum supported SDK version. +fn check_sdk_compatibility( + sdk_version: &str, + sdk_language: &str, + min_supported_sdk_versions: &SdkConstraints, +) -> error::Result<()> { + // Check if the SDK language is present in the minimum supported SDK versions + if let Some(sdk_required_version) = min_supported_sdk_versions.get(sdk_language) { + let sdk_constraint = format!(">={}", sdk_required_version); + + // For Python, use Pep440 versioning + if sdk_language.to_lowercase() == "python" { + let sdk_version_pep440 = PepVersion::from_str(sdk_version) + .map_err(|e| Error::ServerInfoError(format!("Error parsing SDK version: {}", e)))?; + + let specifiers = VersionSpecifier::from_str(&sdk_constraint).map_err(|e| { + Error::ServerInfoError(format!("Error parsing SDK constraint: {}", e)) + })?; + + if !specifiers.contains(&sdk_version_pep440) { + return Err(Error::ServerInfoError(format!( + "SDK version {} must be upgraded to at least {}, in order to work with the current numaflow version", + sdk_version_pep440, sdk_required_version + ))); + } + } else { + // Strip the 'v' prefix if present for non-Python languages + let sdk_version_stripped = sdk_version.trim_start_matches('v'); + + // Parse the SDK version using semver + let sdk_version_semver = Version::parse(sdk_version_stripped) + .map_err(|e| Error::ServerInfoError(format!("Error parsing SDK version: {}", e)))?; + + // Check if the SDK version satisfies the constraint + check_constraint(&sdk_version_semver, &sdk_constraint).map_err(|_| { + Error::ServerInfoError(format!( + "SDK version {} must be upgraded to at least {}, in order to work with the current numaflow version", + sdk_version_semver, sdk_required_version + )) + })?; + } + } else { + // Language not found in the supported SDK versions + warn!( + "SDK version constraint not found for language: {}", + sdk_language + ); + + // Return error indicating the language + return Err(Error::ServerInfoError(format!( + "SDK version constraint not found for language: {}", + sdk_language + ))); + } + Ok(()) +} + +/// Reads the server info file and returns the parsed ServerInfo struct. +async fn read_server_info(file_path: &str) -> error::Result { + // Infinite loop to keep checking until the file is ready + loop { + // Check if the file exists and has content + if let Ok(metadata) = fs::metadata(file_path) { + if metadata.len() > 0 { + // Break out of the loop if the file is ready (has content) + break; + } + } + // Log message indicating the file is not ready and sleep for 1 second before checking again + info!("Server info file {} is not ready, waiting...", file_path); + sleep(Duration::from_secs(1)).await; + } + + // Retry logic for reading the file + let mut retry = 0; + let contents; + loop { + // Attempt to read the file + match fs::read_to_string(file_path) { + Ok(data) => { + if data.ends_with(END) { + // If the file ends with the END marker, trim it and break out of the loop + contents = data.trim_end_matches(END).to_string(); + break; + } else { + warn!("Server info file is incomplete, EOF is missing..."); + } + } + Err(e) => { + warn!("Failed to read file: {}", e); + } + } + + // Retry limit logic + retry += 1; + if retry >= 10 { + // Return an error if the retry limit is reached + return Err(Error::ServerInfoError( + "server-info reading retry exceeded".to_string(), + )); + } + + sleep(Duration::from_millis(100)).await; // Sleep before retrying + } + + // Parse the JSON; if there is an error, return the error + let server_info: ServerInfo = serde_json::from_str(&contents).map_err(|e| { + Error::ServerInfoError(format!( + "Failed to parse server-info file: {}, contents: {}", + e, contents + )) + })?; + + Ok(server_info) // Return the parsed server info +} + + +/// create a mod for version.rs +mod version { + use std::collections::HashMap; + use std::env; + + use once_cell::sync::Lazy; + + pub(crate) type SdkConstraints = HashMap; + + // MINIMUM_SUPPORTED_SDK_VERSIONS is a HashMap with SDK language as key and minimum supported version as value + static MINIMUM_SUPPORTED_SDK_VERSIONS: Lazy = Lazy::new(|| { + // TODO: populate this from a static file and make it part of the release process + let mut m = HashMap::new(); + m.insert("go".to_string(), "0.7.0-rc2".to_string()); + m.insert("python".to_string(), "0.7.0a1".to_string()); + m.insert("java".to_string(), "0.7.2-0".to_string()); + m.insert("rust".to_string(), "0.0.1".to_string()); + m + }); + + // Function to get the minimum supported SDK version hash map + pub(crate) fn get_minimum_supported_sdk_versions() -> &'static SdkConstraints { + &MINIMUM_SUPPORTED_SDK_VERSIONS + } + + /// Struct to hold version information. + #[derive(Debug, PartialEq)] + pub struct VersionInfo { + pub version: String, + pub build_date: String, + pub git_commit: String, + pub git_tag: String, + pub git_tree_state: String, + pub go_version: String, + pub compiler: String, + pub platform: String, + } + + impl VersionInfo { + /// Initialize with environment variables or default values. + fn init() -> Self { + let version = env::var("VERSION").unwrap_or_else(|_| "latest".to_string()); + let build_date = + env::var("BUILD_DATE").unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string()); + let git_commit = env::var("GIT_COMMIT").unwrap_or_default(); + let git_tag = env::var("GIT_TAG").unwrap_or_default(); + let git_tree_state = env::var("GIT_TREE_STATE").unwrap_or_default(); + let go_version = env::var("GO_VERSION").unwrap_or_else(|_| "unknown".to_string()); + let compiler = env::var("COMPILER").unwrap_or_default(); + let platform = env::var("PLATFORM") + .unwrap_or_else(|_| format!("{}/{}", env::consts::OS, env::consts::ARCH)); + + let version_str = + if !git_commit.is_empty() && !git_tag.is_empty() && git_tree_state == "clean" { + git_tag.clone() + } else { + let mut version_str = version.clone(); + if !git_commit.is_empty() && git_commit.len() >= 7 { + version_str.push_str(&format!("+{}", &git_commit[..7])); + if git_tree_state != "clean" { + version_str.push_str(".dirty"); + } + } else { + version_str.push_str("+unknown"); + } + version_str + }; + + VersionInfo { + version: version_str, + build_date, + git_commit, + git_tag, + git_tree_state, + go_version, + compiler, + platform, + } + } + } + + /// Use once_cell::sync::Lazy for thread-safe, one-time initialization + static VERSION_INFO: Lazy = Lazy::new(VersionInfo::init); + + /// Getter function for VersionInfo + pub fn get_version_info() -> &'static VersionInfo { + &VERSION_INFO + } +} + +#[cfg(test)] +mod tests { + use serde_json::json; + use std::io::{Read, Write}; + use std::{collections::HashMap, fs::File}; + use tempfile::tempdir; + + use super::*; + + // Constants for the tests + const MINIMUM_NUMAFLOW_VERSION: &str = "1.2.0-rc4"; + const TCP: &str = "tcp"; + const PYTHON: &str = "python"; + const GOLANG: &str = "go"; + + async fn write_server_info( + svr_info: &ServerInfo, + svr_info_file_path: &str, + ) -> error::Result<()> { + let serialized = serde_json::to_string(svr_info).unwrap(); + + // Remove the existing file if it exists + if let Err(e) = fs::remove_file(svr_info_file_path) { + if e.kind() != std::io::ErrorKind::NotFound { + return Err(Error::ServerInfoError(format!( + "Failed to remove server-info file: {}", + e + ))); + } + } + + // Create a new file + let mut file = File::create(svr_info_file_path); + + // Extract the file from the Result + let mut file = match file { + Ok(f) => f, + Err(e) => { + return Err(Error::ServerInfoError(format!( + "Failed to create server-info file: {}", + e + ))); + } + }; + + // Write the serialized data and the END marker to the file + // Remove the existing file if it exists + if let Err(e) = file.write_all(serialized.as_bytes()) { + return Err(Error::ServerInfoError(format!( + "Failed to write server-info file: {}", + e + ))); + } + if let Err(e) = file.write_all(END.as_bytes()) { + return Err(Error::ServerInfoError(format!( + "Failed to write server-info file: {}", + e + ))); + } + Ok(()) + } + + // Helper function to create a SdkConstraints struct + fn create_sdk_constraints() -> version::SdkConstraints { + let mut constraints = HashMap::new(); + constraints.insert("python".to_string(), "1.2.0".to_string()); + constraints.insert("java".to_string(), "2.0.0".to_string()); + constraints.insert("go".to_string(), "0.10.0".to_string()); + constraints + } + + #[tokio::test] + async fn test_sdk_compatibility_python_valid() { + let sdk_version = "v1.3.0"; + let sdk_language = "python"; + + let min_supported_sdk_versions = create_sdk_constraints(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_sdk_compatibility_python_invalid() { + let sdk_version = "1.1.0"; + let sdk_language = "python"; + + let min_supported_sdk_versions = create_sdk_constraints(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_err()); + } + + #[tokio::test] + async fn test_sdk_compatibility_java_valid() { + let sdk_version = "v2.1.0"; + let sdk_language = "java"; + + let min_supported_sdk_versions = create_sdk_constraints(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_sdk_compatibility_java_invalid() { + let sdk_version = "1.5.0"; + let sdk_language = "java"; + + let min_supported_sdk_versions = create_sdk_constraints(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_err()); + } + + #[tokio::test] + async fn test_sdk_compatibility_go_valid() { + let sdk_version = "0.11.0"; + let sdk_language = "go"; + + let min_supported_sdk_versions = create_sdk_constraints(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_sdk_compatibility_go_invalid() { + let sdk_version = "0.9.0"; + let sdk_language = "go"; + + let min_supported_sdk_versions = create_sdk_constraints(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_err()); + } + + #[tokio::test] + async fn test_numaflow_compatibility_valid() { + let numaflow_version = "1.4.0"; + let min_numaflow_version = "1.3.0"; + + let result = check_numaflow_compatibility(numaflow_version, min_numaflow_version); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_numaflow_compatibility_invalid() { + let numaflow_version = "1.2.0"; + let min_numaflow_version = "1.3.0"; + + let result = check_numaflow_compatibility(numaflow_version, min_numaflow_version); + + assert!(result.is_err()); + } + + #[tokio::test] + async fn test_write_server_info_success() { + // Create a temporary directory + let dir = tempdir().unwrap(); + let file_path = dir.path().join("server_info.txt"); + + // Server info to write + let server_info = ServerInfo { + protocol: TCP.to_string(), + language: GOLANG.to_string(), + minimum_numaflow_version: MINIMUM_NUMAFLOW_VERSION.to_string(), + version: "1.0.0".to_string(), + metadata: { + let mut m = HashMap::new(); + m.insert("key1".to_string(), "value1".to_string()); + Some(m) + }, + }; + + // Write server info + let result = write_server_info(&server_info, file_path.to_str().unwrap()).await; + assert!(result.is_ok(), "Expected Ok, got {:?}", result); + + // Read the file and check its contents + let mut content = String::new(); + File::open(&file_path) + .unwrap() + .read_to_string(&mut content) + .unwrap(); + let expected_json = serde_json::to_string(&server_info).unwrap(); + let expected_content = format!("{}{}", expected_json, END); + assert_eq!(content, expected_content, "File content mismatch"); + } + + #[tokio::test] + async fn test_write_server_info_failure() { + // Invalid file path that cannot be created + let file_path = std::path::PathBuf::from("/invalid/path/server_info.txt"); + + // Server info to write + let server_info = ServerInfo { + protocol: TCP.parse().unwrap(), + language: GOLANG.parse().unwrap(), + minimum_numaflow_version: MINIMUM_NUMAFLOW_VERSION.to_string(), + version: "1.0.0".to_string(), + metadata: { + let mut m = HashMap::new(); + m.insert("key1".to_string(), "value1".to_string()); + Some(m) + }, + }; + + // Write server info + let result = write_server_info(&server_info, file_path.to_str().unwrap()).await; + assert!(result.is_err(), "Expected Err, got {:?}", result); + + // Check that we received the correct error variant + let error = result.unwrap_err(); + assert!( + matches!(error, Error::ServerInfoError(_)), + "Expected ServerInfoError, got {:?}", + error + ); + } + + #[tokio::test] + async fn test_read_server_info_success() { + // Create a temporary directory + let dir = tempfile::tempdir().unwrap(); + let file_path = dir.path().join("server_info.txt"); + + // Server info to write + let server_info = ServerInfo { + protocol: TCP.parse().unwrap(), + language: PYTHON.parse().unwrap(), + minimum_numaflow_version: MINIMUM_NUMAFLOW_VERSION.to_string(), + version: "1.0.0".to_string(), + metadata: { + let mut m = HashMap::new(); + m.insert("key1".to_string(), "value1".to_string()); + Some(m) + }, + }; + + // Write server info + let _ = write_server_info(&server_info, file_path.to_str().unwrap()).await; + + // Call the read_server_info function + let result = read_server_info(file_path.to_str().unwrap()).await; + assert!(result.is_ok(), "Expected Ok, got {:?}", result); + + let server_info = result.unwrap(); + assert_eq!(server_info.protocol, "tcp"); + assert_eq!(server_info.language, "python"); + assert_eq!(server_info.minimum_numaflow_version, "1.2.0-rc4"); + assert_eq!(server_info.version, "1.0.0"); + // Check metadata + assert!(server_info.metadata.is_some()); + let server_info = server_info.metadata.unwrap(); + assert_eq!(server_info.len(), 1); + assert_eq!(server_info.get("key1").unwrap(), "value1"); + } + + #[tokio::test] + async fn test_read_server_info_retry_limit() { + // Create a temporary directory + let dir = tempfile::tempdir().unwrap(); + let file_path = dir.path().join("server_info.txt"); + + // Write a partial test file not ending with END marker + let mut file = File::create(&file_path).unwrap(); + writeln!(file, r#"{{"protocol":"tcp","language":"go","minimum_numaflow_version":"1.2.0-rc4","version":"1.0.0","metadata":{{"key1":"value1"}}}}"#).unwrap(); + + // Call the read_server_info function + let result = read_server_info(file_path.to_str().unwrap()).await; + assert!(result.is_err(), "Expected Err, got {:?}", result); + + let error = result.unwrap_err(); + assert!( + matches!(error, Error::ServerInfoError(_)), + "Expected ServerInfoError, got {:?}", + error + ); + } + + #[test] + fn test_deserialize_with_null_metadata() { + let json_data = json!({ + "protocol": "uds", + "language": "go", + "minimum_numaflow_version": "1.2.0-rc4", + "version": "v0.7.0-rc2", + "metadata": null + }) + .to_string(); + + let _expected_server_info = ServerInfo { + protocol: "uds".to_string(), + language: "go".to_string(), + minimum_numaflow_version: "1.2.0-rc4".to_string(), + version: "v0.7.0-rc2".to_string(), + metadata: Some(HashMap::new()), // Expecting an empty HashMap here + }; + + let _parsed_server_info: ServerInfo = + serde_json::from_str(&json_data).expect("Failed to parse JSON"); + } + + #[test] + fn test_sdk_compatibility_go_version_with_v_prefix() { + let sdk_version = "v0.11.0"; + let sdk_language = "go"; + + let mut min_supported_sdk_versions = HashMap::new(); + min_supported_sdk_versions.insert("go".to_string(), "0.10.0".to_string()); + + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_ok()); + } + + #[test] + fn test_sdk_compatibility_go_version_without_v_prefix() { + let sdk_version = "0.11.0"; + let sdk_language = "go"; + + let mut min_supported_sdk_versions = HashMap::new(); + min_supported_sdk_versions.insert("go".to_string(), "0.10.0".to_string()); + + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_ok()); + } + + #[test] + fn test_sdk_compatibility_go_version_with_v_prefix_invalid() { + let sdk_version = "v0.9.0"; + let sdk_language = "go"; + + let mut min_supported_sdk_versions = HashMap::new(); + min_supported_sdk_versions.insert("go".to_string(), "0.10.0".to_string()); + + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_err()); + } + + #[test] + fn test_sdk_compatibility_go_version_without_v_prefix_invalid() { + let sdk_version = "0.9.0"; + let sdk_language = "go"; + + let mut min_supported_sdk_versions = HashMap::new(); + min_supported_sdk_versions.insert("go".to_string(), "0.10.0".to_string()); + + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_err()); + } +} From 1b00923b24ddc666a0f2efb72dca162421701a96 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Sat, 10 Aug 2024 23:14:55 +0530 Subject: [PATCH 14/23] chore: improve shutdown and health checks for MonoVertex (#1919) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- serving/Cargo.lock | 1 + serving/source-sink/Cargo.toml | 1 + serving/source-sink/src/config.rs | 14 ++- serving/source-sink/src/forwarder.rs | 28 ++--- serving/source-sink/src/lib.rs | 128 +++++++------------ serving/source-sink/src/main.rs | 72 +++++++---- serving/source-sink/src/metrics.rs | 167 ++++++++++++------------- serving/source-sink/src/server_info.rs | 54 +++++--- serving/source-sink/src/sink.rs | 14 +-- serving/source-sink/src/source.rs | 10 +- serving/source-sink/src/transformer.rs | 10 +- 11 files changed, 248 insertions(+), 251 deletions(-) diff --git a/serving/Cargo.lock b/serving/Cargo.lock index 4bb0fa7c0f..30fd1a14db 100644 --- a/serving/Cargo.lock +++ b/serving/Cargo.lock @@ -2777,6 +2777,7 @@ dependencies = [ "tower", "tracing", "tracing-subscriber", + "trait-variant", "uuid", ] diff --git a/serving/source-sink/Cargo.toml b/serving/source-sink/Cargo.toml index 26d4027da6..9813d52b1e 100644 --- a/serving/source-sink/Cargo.toml +++ b/serving/source-sink/Cargo.toml @@ -26,6 +26,7 @@ uuid = { version = "1.10.0", features = ["v4"] } once_cell = "1.19.0" serde_json = "1.0.122" numaflow-models = { path = "../numaflow-models"} +trait-variant = "0.1.2" rcgen = "0.13.1" rustls = { version = "0.23.12", features = ["aws_lc_rs"] } serde = { version = "1.0.204", features = ["derive"] } diff --git a/serving/source-sink/src/config.rs b/serving/source-sink/src/config.rs index 8adbc2691d..3939e16f1a 100644 --- a/serving/source-sink/src/config.rs +++ b/serving/source-sink/src/config.rs @@ -1,11 +1,14 @@ -use crate::error::Error; -use base64::prelude::BASE64_STANDARD; -use base64::Engine; -use numaflow_models::models::MonoVertex; use std::env; use std::sync::OnceLock; + +use base64::prelude::BASE64_STANDARD; +use base64::Engine; use tracing::level_filters::LevelFilter; +use numaflow_models::models::MonoVertex; + +use crate::error::Error; + const ENV_MONO_VERTEX_OBJ: &str = "NUMAFLOW_MONO_VERTEX_OBJECT"; const ENV_GRPC_MAX_MESSAGE_SIZE: &str = "NUMAFLOW_GRPC_MAX_MESSAGE_SIZE"; const ENV_POD_REPLICA: &str = "NUMAFLOW_REPLICA"; @@ -125,9 +128,10 @@ impl Settings { #[cfg(test)] mod tests { - use super::*; use std::env; + use super::*; + #[test] fn test_settings_load() { // Set up environment variables diff --git a/serving/source-sink/src/forwarder.rs b/serving/source-sink/src/forwarder.rs index cd39038a7c..002d42ee9c 100644 --- a/serving/source-sink/src/forwarder.rs +++ b/serving/source-sink/src/forwarder.rs @@ -1,3 +1,9 @@ +use chrono::Utc; +use metrics::counter; +use tokio::task::JoinSet; +use tokio_util::sync::CancellationToken; +use tracing::{info, trace}; + use crate::config::config; use crate::error::{Error, Result}; use crate::metrics::{ @@ -7,11 +13,6 @@ use crate::metrics::{ use crate::sink::SinkClient; use crate::source::SourceClient; use crate::transformer::TransformerClient; -use chrono::Utc; -use metrics::counter; -use tokio::sync::oneshot; -use tokio::task::JoinSet; -use tracing::{info, trace}; const MONO_VERTEX_TYPE: &str = "mono_vertex"; @@ -22,7 +23,7 @@ pub(crate) struct Forwarder { source_client: SourceClient, sink_client: SinkClient, transformer_client: Option, - shutdown_rx: oneshot::Receiver<()>, + cln_token: CancellationToken, common_labels: Vec<(String, String)>, } @@ -32,7 +33,7 @@ impl Forwarder { source_client: SourceClient, sink_client: SinkClient, transformer_client: Option, - shutdown_rx: oneshot::Receiver<()>, + cln_token: CancellationToken, ) -> Result { let common_labels = vec![ ( @@ -48,8 +49,8 @@ impl Forwarder { source_client, sink_client, transformer_client, - shutdown_rx, common_labels, + cln_token, }) } @@ -64,7 +65,7 @@ impl Forwarder { let start_time = tokio::time::Instant::now(); // two arms, either shutdown or forward-a-chunk tokio::select! { - _ = &mut self.shutdown_rx => { + _ = self.cln_token.cancelled() => { info!("Shutdown signal received, stopping forwarder..."); break; } @@ -145,6 +146,7 @@ mod tests { use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source, sourcetransform}; use tokio::sync::mpsc::Sender; + use tokio_util::sync::CancellationToken; use crate::forwarder::Forwarder; use crate::sink::{SinkClient, SinkConfig}; @@ -347,7 +349,7 @@ mod tests { // Wait for the servers to start tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let (forwarder_shutdown_tx, forwarder_shutdown_rx) = tokio::sync::oneshot::channel(); + let cln_token = CancellationToken::new(); let source_client = SourceClient::connect(source_config) .await @@ -365,7 +367,7 @@ mod tests { source_client, sink_client, Some(transformer_client), - forwarder_shutdown_rx, + cln_token.clone(), ) .await .expect("failed to create forwarder"); @@ -383,9 +385,7 @@ mod tests { ); // stop the forwarder - forwarder_shutdown_tx - .send(()) - .expect("failed to send shutdown signal"); + cln_token.cancel(); forwarder_handle .await .expect("failed to join forwarder task"); diff --git a/serving/source-sink/src/lib.rs b/serving/source-sink/src/lib.rs index 35a4d15a1a..15b59e537f 100644 --- a/serving/source-sink/src/lib.rs +++ b/serving/source-sink/src/lib.rs @@ -1,16 +1,16 @@ +use std::net::SocketAddr; use std::time::Duration; -use tokio::signal; -use tokio::sync::oneshot; -use tokio::task::JoinHandle; -use tokio::time::sleep; -use tracing::{error, info, warn}; - +use crate::config::config; pub(crate) use crate::error::Error; use crate::forwarder::Forwarder; +use crate::metrics::{start_metrics_https_server, MetricsState}; use crate::sink::{SinkClient, SinkConfig}; use crate::source::{SourceClient, SourceConfig}; use crate::transformer::{TransformerClient, TransformerConfig}; +use tokio::time::sleep; +use tokio_util::sync::CancellationToken; +use tracing::{error, info, warn}; pub(crate) use self::error::Result; @@ -40,13 +40,13 @@ pub(crate) mod shared; /// forwards a chunk of data from the source to the sink via an optional transformer. /// It takes an optional custom_shutdown_rx for shutting down the forwarder, useful for testing. -pub async fn run_forwarder( +pub async fn init( source_config: SourceConfig, sink_config: SinkConfig, transformer_config: Option, - custom_shutdown_rx: Option>, + cln_token: CancellationToken, ) -> Result<()> { - server_info::check_for_server_compatibility(&source_config.server_info_file) + server_info::check_for_server_compatibility(&source_config.server_info_file, cln_token.clone()) .await .map_err(|e| { warn!("Error waiting for source server info file: {:?}", e); @@ -54,11 +54,7 @@ pub async fn run_forwarder( })?; let mut source_client = SourceClient::connect(source_config).await?; - // start the lag reader to publish lag metrics - let mut lag_reader = metrics::LagReader::new(source_client.clone(), None, None); - lag_reader.start().await; - - server_info::check_for_server_compatibility(&sink_config.server_info_file) + server_info::check_for_server_compatibility(&sink_config.server_info_file, cln_token.clone()) .await .map_err(|e| { warn!("Error waiting for sink server info file: {:?}", e); @@ -68,7 +64,7 @@ pub async fn run_forwarder( let mut sink_client = SinkClient::connect(sink_config).await?; let mut transformer_client = if let Some(config) = transformer_config { - server_info::check_for_server_compatibility(&config.server_info_file) + server_info::check_for_server_compatibility(&config.server_info_file, cln_token.clone()) .await .map_err(|e| { warn!("Error waiting for transformer server info file: {:?}", e); @@ -79,8 +75,6 @@ pub async fn run_forwarder( None }; - let (shutdown_tx, shutdown_rx) = oneshot::channel(); - // readiness check for all the ud containers wait_until_ready( &mut source_client, @@ -89,38 +83,35 @@ pub async fn run_forwarder( ) .await?; - // TODO: use builder pattern of options like TIMEOUT, BATCH_SIZE, etc? - let mut forwarder = - Forwarder::new(source_client, sink_client, transformer_client, shutdown_rx).await?; - - let forwarder_handle: JoinHandle> = tokio::spawn(async move { - forwarder.run().await?; - Ok(()) + // Start the metrics server, which server the prometheus metrics. + let metrics_addr: SocketAddr = format!("0.0.0.0:{}", &config().metrics_server_listen_port) + .parse() + .expect("Invalid address"); + + // Start the metrics server in a separate background async spawn, + // This should be running throughout the lifetime of the application, hence the handle is not + // joined. + let metrics_state = MetricsState { + source_client: source_client.clone(), + sink_client: sink_client.clone(), + transformer_client: transformer_client.clone(), + }; + tokio::spawn(async move { + if let Err(e) = start_metrics_https_server(metrics_addr, metrics_state).await { + error!("Metrics server error: {:?}", e); + } }); - let shutdown_handle: JoinHandle> = tokio::spawn(async move { - shutdown_signal(custom_shutdown_rx).await; - shutdown_tx - .send(()) - .map_err(|_| Error::ForwarderError("Failed to send shutdown signal".to_string()))?; - Ok(()) - }); + // start the lag reader to publish lag metrics + let mut lag_reader = metrics::LagReader::new(source_client.clone(), None, None); + lag_reader.start().await; - forwarder_handle - .await - .unwrap_or_else(|e| { - error!("Forwarder task panicked: {:?}", e); - Err(Error::ForwarderError("Forwarder task panicked".to_string())) - }) - .unwrap_or_else(|e| { - error!("Forwarder failed: {:?}", e); - }); + // TODO: use builder pattern of options like TIMEOUT, BATCH_SIZE, etc? + let mut forwarder = + Forwarder::new(source_client, sink_client, transformer_client, cln_token).await?; - if !shutdown_handle.is_finished() { - shutdown_handle.abort(); - } + forwarder.run().await?; - lag_reader.shutdown().await; info!("Forwarder stopped gracefully"); Ok(()) } @@ -131,18 +122,18 @@ async fn wait_until_ready( transformer_client: &mut Option, ) -> Result<()> { loop { - let source_ready = source_client.is_ready().await.is_ok(); + let source_ready = source_client.is_ready().await; if !source_ready { info!("UDSource is not ready, waiting..."); } - let sink_ready = sink_client.is_ready().await.is_ok(); + let sink_ready = sink_client.is_ready().await; if !sink_ready { info!("UDSink is not ready, waiting..."); } let transformer_ready = if let Some(client) = transformer_client { - let ready = client.is_ready().await.is_ok(); + let ready = client.is_ready().await; if !ready { info!("UDTransformer is not ready, waiting..."); } @@ -161,40 +152,6 @@ async fn wait_until_ready( Ok(()) } -async fn shutdown_signal(shutdown_rx: Option>) { - let ctrl_c = async { - signal::ctrl_c() - .await - .expect("failed to install Ctrl+C handler"); - info!("Received Ctrl+C signal"); - }; - - let terminate = async { - signal::unix::signal(signal::unix::SignalKind::terminate()) - .expect("failed to install signal handler") - .recv() - .await; - info!("Received terminate signal"); - }; - - let custom_shutdown = async { - if let Some(rx) = shutdown_rx { - rx.await.ok(); - } else { - // Create a watch channel that never sends - let (_tx, mut rx) = tokio::sync::watch::channel(()); - rx.changed().await.ok(); - } - info!("Received custom shutdown signal"); - }; - - tokio::select! { - _ = ctrl_c => {}, - _ = terminate => {}, - _ = custom_shutdown => {}, - } -} - #[cfg(test)] mod tests { use std::env; @@ -202,6 +159,7 @@ mod tests { use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source}; use tokio::sync::mpsc::Sender; + use tokio_util::sync::CancellationToken; use crate::sink::SinkConfig; use crate::source::SourceConfig; @@ -284,11 +242,11 @@ mod tests { env::set_var("SOURCE_SOCKET", src_sock_file.to_str().unwrap()); env::set_var("SINK_SOCKET", sink_sock_file.to_str().unwrap()); - let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let cln_token = CancellationToken::new(); + let forwarder_cln_token = cln_token.clone(); let forwarder_handle = tokio::spawn(async move { - let result = - super::run_forwarder(source_config, sink_config, None, Some(shutdown_rx)).await; + let result = super::init(source_config, sink_config, None, forwarder_cln_token).await; assert!(result.is_ok()); }); @@ -297,7 +255,7 @@ mod tests { tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; // stop the forwarder - shutdown_tx.send(()).unwrap(); + cln_token.cancel(); forwarder_handle.await.unwrap(); // stop the source and sink servers diff --git a/serving/source-sink/src/main.rs b/serving/source-sink/src/main.rs index 0013e33613..e3cfb7e6a6 100644 --- a/serving/source-sink/src/main.rs +++ b/serving/source-sink/src/main.rs @@ -1,41 +1,28 @@ +use tokio::signal; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use tracing::level_filters::LevelFilter; +use tracing::{error, info}; +use tracing_subscriber::EnvFilter; + use sourcer_sinker::config::config; -use sourcer_sinker::metrics::start_metrics_https_server; -use sourcer_sinker::run_forwarder; +use sourcer_sinker::init; use sourcer_sinker::sink::SinkConfig; use sourcer_sinker::source::SourceConfig; use sourcer_sinker::transformer::TransformerConfig; -use std::env; -use std::net::SocketAddr; -use tracing::level_filters::LevelFilter; -use tracing::{error, info}; -use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() { - let log_level = env::var("NUMAFLOW_DEBUG").unwrap_or_else(|_| LevelFilter::INFO.to_string()); // Initialize the logger tracing_subscriber::fmt() .with_env_filter( EnvFilter::builder() .with_default_directive(LevelFilter::INFO.into()) - .parse_lossy(log_level), + .parse_lossy(&config().log_level), ) .with_target(false) .init(); - // Start the metrics server, which server the prometheus metrics. - // TODO: make the port configurable. - let metrics_addr: SocketAddr = "0.0.0.0:2469".parse().expect("Invalid address"); - - // Start the metrics server in a separate background async spawn, - // This should be running throughout the lifetime of the application, hence the handle is not - // joined. - tokio::spawn(async move { - if let Err(e) = start_metrics_https_server(metrics_addr).await { - error!("Metrics server error: {:?}", e); - } - }); - // Initialize the source, sink and transformer configurations // We are using the default configurations for now. let source_config = SourceConfig { @@ -47,6 +34,7 @@ async fn main() { max_message_size: config().grpc_max_message_size, ..Default::default() }; + let transformer_config = if config().is_transformer_enabled { Some(TransformerConfig { max_message_size: config().grpc_max_message_size, @@ -56,10 +44,46 @@ async fn main() { None }; - // Run the forwarder - if let Err(e) = run_forwarder(source_config, sink_config, transformer_config, None).await { + let cln_token = CancellationToken::new(); + let shutdown_cln_token = cln_token.clone(); + // wait for SIG{INT,TERM} and invoke cancellation token. + let shutdown_handle: JoinHandle> = tokio::spawn(async move { + shutdown_signal().await; + shutdown_cln_token.cancel(); + Ok(()) + }); + + // Run the forwarder with cancellation token. + if let Err(e) = init(source_config, sink_config, transformer_config, cln_token).await { error!("Application error: {:?}", e); + + // abort the task since we have an error + if !shutdown_handle.is_finished() { + shutdown_handle.abort(); + } } info!("Gracefully Exiting..."); } + +async fn shutdown_signal() { + let ctrl_c = async { + signal::ctrl_c() + .await + .expect("failed to install Ctrl+C handler"); + info!("Received Ctrl+C signal"); + }; + + let terminate = async { + signal::unix::signal(signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + info!("Received terminate signal"); + }; + + tokio::select! { + _ = ctrl_c => {}, + _ = terminate => {}, + } +} diff --git a/serving/source-sink/src/metrics.rs b/serving/source-sink/src/metrics.rs index b33b5b2fb2..896c7a768d 100644 --- a/serving/source-sink/src/metrics.rs +++ b/serving/source-sink/src/metrics.rs @@ -3,6 +3,7 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; +use axum::extract::State; use axum::http::StatusCode; use axum::response::IntoResponse; use axum::{routing::get, Router}; @@ -14,11 +15,12 @@ use tokio::net::{TcpListener, ToSocketAddrs}; use tokio::sync::Mutex; use tokio::task::JoinHandle; use tokio::time; -use tokio_util::sync::CancellationToken; use tracing::{debug, error, info}; use crate::error::Error; +use crate::sink::SinkClient; use crate::source::SourceClient; +use crate::transformer::TransformerClient; // Define the labels for the metrics pub const MONO_VERTEX_NAME: &str = "vertex"; @@ -32,16 +34,37 @@ pub const FORWARDER_READ_BYTES_TOTAL: &str = "forwarder_read_bytes_total"; pub const FORWARDER_ACK_TOTAL: &str = "forwarder_ack_total"; pub const FORWARDER_WRITE_TOTAL: &str = "forwarder_write_total"; +#[derive(Clone)] +pub(crate) struct MetricsState { + pub source_client: SourceClient, + pub sink_client: SinkClient, + pub transformer_client: Option, +} + /// Collect and emit prometheus metrics. -/// Metrics router and server -pub async fn start_metrics_http_server(addr: A) -> crate::Result<()> +/// Metrics router and server over HTTP endpoint. +// This is not used currently +#[allow(dead_code)] +pub(crate) async fn start_metrics_http_server( + addr: A, + source_client: SourceClient, + sink_client: SinkClient, + transformer_client: Option, +) -> crate::Result<()> where A: ToSocketAddrs + std::fmt::Debug, { // setup_metrics_recorder should only be invoked once let recorder_handle = setup_metrics_recorder()?; - let metrics_app = metrics_router(recorder_handle); + let metrics_app = metrics_router( + recorder_handle, + MetricsState { + source_client, + sink_client, + transformer_client, + }, + ); let listener = TcpListener::bind(&addr) .await @@ -55,9 +78,10 @@ where Ok(()) } -pub async fn start_metrics_https_server(addr: SocketAddr) -> crate::Result<()> -where -{ +pub(crate) async fn start_metrics_https_server( + addr: SocketAddr, + metrics_state: MetricsState, +) -> crate::Result<()> { let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); // Generate a self-signed certificate @@ -71,7 +95,7 @@ where // setup_metrics_recorder should only be invoked once let recorder_handle = setup_metrics_recorder()?; - let metrics_app = metrics_router(recorder_handle); + let metrics_app = metrics_router(recorder_handle, metrics_state); axum_server::bind_rustls(addr, tls_config) .serve(metrics_app.into_make_service()) @@ -82,12 +106,14 @@ where } /// router for metrics and k8s health endpoints -fn metrics_router(recorder_handle: PrometheusHandle) -> Router { +fn metrics_router(recorder_handle: PrometheusHandle, metrics_state: MetricsState) -> Router { let metrics_app = Router::new() .route("/metrics", get(move || ready(recorder_handle.render()))) .route("/livez", get(livez)) .route("/readyz", get(readyz)) - .route("/sidecar-livez", get(sidecar_livez)); + .route("/sidecar-livez", get(sidecar_livez)) + .with_state(metrics_state); + metrics_app } @@ -99,7 +125,18 @@ async fn readyz() -> impl IntoResponse { StatusCode::NO_CONTENT } -async fn sidecar_livez() -> impl IntoResponse { +async fn sidecar_livez(State(mut state): State) -> impl IntoResponse { + if !state.source_client.is_ready().await { + return StatusCode::SERVICE_UNAVAILABLE; + } + if !state.sink_client.is_ready().await { + return StatusCode::SERVICE_UNAVAILABLE; + } + if let Some(mut transformer_client) = state.transformer_client { + if !transformer_client.is_ready().await { + return StatusCode::SERVICE_UNAVAILABLE; + } + } StatusCode::NO_CONTENT } @@ -157,7 +194,6 @@ pub(crate) struct LagReader { source_client: SourceClient, lag_checking_interval: Duration, refresh_interval: Duration, - cancellation_token: CancellationToken, buildup_handle: Option>, expose_handle: Option>, pending_stats: Arc>>, @@ -174,7 +210,6 @@ impl LagReader { source_client, lag_checking_interval: lag_checking_interval.unwrap_or_else(|| Duration::from_secs(3)), refresh_interval: refresh_interval.unwrap_or_else(|| Duration::from_secs(5)), - cancellation_token: CancellationToken::new(), buildup_handle: None, expose_handle: None, pending_stats: Arc::new(Mutex::new(Vec::with_capacity(MAX_PENDING_STATS))), @@ -187,94 +222,81 @@ impl LagReader { /// - One to periodically check the lag and update the pending stats. /// - Another to periodically expose the pending metrics. pub async fn start(&mut self) { - let token = self.cancellation_token.clone(); let source_client = self.source_client.clone(); let lag_checking_interval = self.lag_checking_interval; let refresh_interval = self.refresh_interval; let pending_stats = self.pending_stats.clone(); self.buildup_handle = Some(tokio::spawn(async move { - build_pending_info(source_client, token, lag_checking_interval, pending_stats).await; + build_pending_info(source_client, lag_checking_interval, pending_stats).await; })); - let token = self.cancellation_token.clone(); let pending_stats = self.pending_stats.clone(); self.expose_handle = Some(tokio::spawn(async move { - expose_pending_metrics(token, refresh_interval, pending_stats).await; + expose_pending_metrics(refresh_interval, pending_stats).await; })); } +} - /// Shuts down the lag reader by cancelling the tasks and waiting for them to complete. - pub(crate) async fn shutdown(self) { - self.cancellation_token.cancel(); - if let Some(handle) = self.buildup_handle { - let _ = handle.await; +/// When lag-reader is dropped, we need to clean up the pending exposer and the pending builder tasks. +impl Drop for LagReader { + fn drop(&mut self) { + if let Some(handle) = self.expose_handle.take() { + handle.abort(); } - if let Some(handle) = self.expose_handle { - let _ = handle.await; + if let Some(handle) = self.buildup_handle.take() { + handle.abort(); } + + info!("Stopped the Lag-Reader Expose and Builder tasks"); } } /// Periodically checks the pending messages from the source client and build the pending stats. async fn build_pending_info( mut source_client: SourceClient, - cancellation_token: CancellationToken, lag_checking_interval: Duration, pending_stats: Arc>>, ) { let mut ticker = time::interval(lag_checking_interval); loop { - tokio::select! { - _ = cancellation_token.cancelled() => { - return; - } - _ = ticker.tick() => { - match source_client.pending_fn().await { - Ok(pending) => { - if pending != -1 { - let mut stats = pending_stats.lock().await; - stats.push(TimestampedPending { - pending, - timestamp: std::time::Instant::now(), - }); - let n = stats.len(); - // Ensure only the most recent MAX_PENDING_STATS entries are kept - if n >= MAX_PENDING_STATS { - stats.drain(0..(n - MAX_PENDING_STATS)); - } - } - } - Err(err) => { - error!("Failed to get pending messages: {:?}", err); + ticker.tick().await; + match source_client.pending_fn().await { + Ok(pending) => { + if pending != -1 { + let mut stats = pending_stats.lock().await; + stats.push(TimestampedPending { + pending, + timestamp: std::time::Instant::now(), + }); + let n = stats.len(); + // Ensure only the most recent MAX_PENDING_STATS entries are kept + if n >= MAX_PENDING_STATS { + stats.drain(0..(n - MAX_PENDING_STATS)); } } } + Err(err) => { + error!("Failed to get pending messages: {:?}", err); + } } } } // Periodically exposes the pending metrics by calculating the average pending messages over different intervals. async fn expose_pending_metrics( - cancellation_token: CancellationToken, refresh_interval: Duration, pending_stats: Arc>>, ) { let mut ticker = time::interval(refresh_interval); let lookback_seconds_map = vec![("1m", 60), ("5m", 300), ("15m", 900)]; loop { - tokio::select! { - _ = cancellation_token.cancelled() => { - return; - } - _ = ticker.tick() => { - for (label, seconds) in &lookback_seconds_map { - let pending = calculate_pending(*seconds, &pending_stats).await; - if pending != -1 { - // TODO: emit it as a metric - info!("Pending messages ({}): {}", label, pending); - } - } + ticker.tick().await; + for (label, seconds) in &lookback_seconds_map { + let pending = calculate_pending(*seconds, &pending_stats).await; + if pending != -1 { + // TODO: emit it as a metric + info!("Pending messages ({}): {}", label, pending); } } } @@ -307,27 +329,4 @@ async fn calculate_pending( result } -#[cfg(test)] -mod tests { - use std::net::SocketAddr; - use std::time::Duration; - - use tokio::time::sleep; - - use super::*; - - #[tokio::test] - async fn test_start_metrics_server() { - let addr = SocketAddr::from(([127, 0, 0, 1], 0)); - let server = tokio::spawn(async move { - let result = start_metrics_http_server(addr).await; - assert!(result.is_ok()) - }); - - // Give the server a little bit of time to start - sleep(Duration::from_millis(100)).await; - - // Stop the server - server.abort(); - } -} +// TODO add tests diff --git a/serving/source-sink/src/server_info.rs b/serving/source-sink/src/server_info.rs index fe841fe99e..7412b2ca9d 100644 --- a/serving/source-sink/src/server_info.rs +++ b/serving/source-sink/src/server_info.rs @@ -7,10 +7,10 @@ use pep440_rs::{Version as PepVersion, VersionSpecifier}; use semver::{Version, VersionReq}; use serde::{Deserialize, Serialize}; use tokio::time::sleep; +use tokio_util::sync::CancellationToken; use tracing::{info, warn}; -use crate::error; -use crate::error::Error; +use crate::error::{self, Error}; use crate::server_info::version::SdkConstraints; // Constant to represent the end of the server info. @@ -34,9 +34,12 @@ pub(crate) struct ServerInfo { /// check_for_server_compatibility waits until the server info file is ready and check whether the /// server is compatible with Numaflow. -pub async fn check_for_server_compatibility(file_path: &str) -> error::Result<()> { +pub async fn check_for_server_compatibility( + file_path: &str, + cln_token: CancellationToken, +) -> error::Result<()> { // Read the server info file - let server_info = read_server_info(file_path).await?; + let server_info = read_server_info(file_path, cln_token).await?; // Log the server info info!("Server info file: {:?}", server_info); @@ -79,8 +82,7 @@ fn check_constraint(version: &Version, constraint: &str) -> error::Result<()> { // Parse the given constraint as a semantic version requirement let version_req = VersionReq::parse(constraint).map_err(|e| { Error::ServerInfoError(format!( - "Error parsing constraint: {},\ - constraint string: {}", + "Error parsing constraint: {}, constraint string: {}", e, constraint )) })?; @@ -109,14 +111,12 @@ fn check_numaflow_compatibility( // Create a version constraint based on the minimum numaflow version let numaflow_constraint = format!(">={}", min_numaflow_version); - Ok( - check_constraint(&numaflow_version_semver, &numaflow_constraint).map_err(|e| { - Error::ServerInfoError(format!( - "numaflow version {} must be upgraded to at least {}, in order to work with current SDK version {}", - numaflow_version_semver, min_numaflow_version, e - )) - })? - ) + check_constraint(&numaflow_version_semver, &numaflow_constraint).map_err(|e| { + Error::ServerInfoError(format!( + "numaflow version {} must be upgraded to at least {}, in order to work with current SDK version {}", + numaflow_version_semver, min_numaflow_version, e + )) + }) } /// Checks if the current SDK version is compatible with the given language's minimum supported SDK version. @@ -177,9 +177,18 @@ fn check_sdk_compatibility( } /// Reads the server info file and returns the parsed ServerInfo struct. -async fn read_server_info(file_path: &str) -> error::Result { +/// The cancellation token is used to stop ready-check of server_info file in case it is missing. +/// This cancellation token is closed via the global shutdown handler. +async fn read_server_info( + file_path: &str, + cln_token: CancellationToken, +) -> error::Result { // Infinite loop to keep checking until the file is ready loop { + if cln_token.is_cancelled() { + return Err(Error::ServerInfoError("Operation cancelled".to_string())); + } + // Check if the file exists and has content if let Ok(metadata) = fs::metadata(file_path) { if metadata.len() > 0 { @@ -235,7 +244,6 @@ async fn read_server_info(file_path: &str) -> error::Result { Ok(server_info) // Return the parsed server info } - /// create a mod for version.rs mod version { use std::collections::HashMap; @@ -358,7 +366,7 @@ mod tests { } // Create a new file - let mut file = File::create(svr_info_file_path); + let file = File::create(svr_info_file_path); // Extract the file from the Result let mut file = match file { @@ -560,6 +568,9 @@ mod tests { let dir = tempfile::tempdir().unwrap(); let file_path = dir.path().join("server_info.txt"); + let cln_token = CancellationToken::new(); + let _drop_guard = cln_token.clone().drop_guard(); + // Server info to write let server_info = ServerInfo { protocol: TCP.parse().unwrap(), @@ -577,7 +588,7 @@ mod tests { let _ = write_server_info(&server_info, file_path.to_str().unwrap()).await; // Call the read_server_info function - let result = read_server_info(file_path.to_str().unwrap()).await; + let result = read_server_info(file_path.to_str().unwrap(), cln_token).await; assert!(result.is_ok(), "Expected Ok, got {:?}", result); let server_info = result.unwrap(); @@ -602,8 +613,11 @@ mod tests { let mut file = File::create(&file_path).unwrap(); writeln!(file, r#"{{"protocol":"tcp","language":"go","minimum_numaflow_version":"1.2.0-rc4","version":"1.0.0","metadata":{{"key1":"value1"}}}}"#).unwrap(); + let cln_token = CancellationToken::new(); + let _drop_guard = cln_token.clone().drop_guard(); + // Call the read_server_info function - let result = read_server_info(file_path.to_str().unwrap()).await; + let result = read_server_info(file_path.to_str().unwrap(), cln_token).await; assert!(result.is_err(), "Expected Err, got {:?}", result); let error = result.unwrap_err(); @@ -623,7 +637,7 @@ mod tests { "version": "v0.7.0-rc2", "metadata": null }) - .to_string(); + .to_string(); let _expected_server_info = ServerInfo { protocol: "uds".to_string(), diff --git a/serving/source-sink/src/sink.rs b/serving/source-sink/src/sink.rs index 6312287338..ab9a0f49c2 100644 --- a/serving/source-sink/src/sink.rs +++ b/serving/source-sink/src/sink.rs @@ -30,6 +30,7 @@ impl Default for SinkConfig { } } +#[derive(Clone)] /// SinkClient is a client to interact with the sink server. pub struct SinkClient { client: proto::sink_client::SinkClient, @@ -67,20 +68,19 @@ impl SinkClient { Ok(response) } - pub(crate) async fn is_ready(&mut self) -> Result { - let request = Request::new(()); - let response = self.client.is_ready(request).await?.into_inner(); - Ok(response) + pub(crate) async fn is_ready(&mut self) -> bool { + self.client.is_ready(Request::new(())).await.is_ok() } } #[cfg(test)] mod tests { - use crate::message::Offset; use chrono::offset::Utc; use numaflow::sink; use tracing::info; + use crate::message::Offset; + use super::*; struct Logger; @@ -159,8 +159,8 @@ mod tests { }, ]; - let ready_response = sink_client.is_ready().await.unwrap(); - assert_eq!(ready_response.ready, true); + let ready_response = sink_client.is_ready().await; + assert!(ready_response); let response = sink_client.sink_fn(messages).await.unwrap(); assert_eq!(response.results.len(), 2); diff --git a/serving/source-sink/src/source.rs b/serving/source-sink/src/source.rs index 3c164bb5e2..3e4ec30d40 100644 --- a/serving/source-sink/src/source.rs +++ b/serving/source-sink/src/source.rs @@ -115,10 +115,8 @@ impl SourceClient { Ok(response.result.map_or(vec![], |r| r.partitions)) } - pub(crate) async fn is_ready(&mut self) -> Result { - let request = Request::new(()); - let response = self.client.is_ready(request).await?.into_inner(); - Ok(response) + pub(crate) async fn is_ready(&mut self) -> bool { + self.client.is_ready(Request::new(())).await.is_ok() } } @@ -222,8 +220,8 @@ mod tests { .await .expect("failed to connect to source server"); - let response = source_client.is_ready().await.unwrap(); - assert!(response.ready); + let response = source_client.is_ready().await; + assert!(response); let messages = source_client.read_fn(5, 1000).await.unwrap(); assert_eq!(messages.len(), 5); diff --git a/serving/source-sink/src/transformer.rs b/serving/source-sink/src/transformer.rs index 2bbca45bce..5a3f70f73f 100644 --- a/serving/source-sink/src/transformer.rs +++ b/serving/source-sink/src/transformer.rs @@ -74,10 +74,8 @@ impl TransformerClient { Ok(messages) } - pub(crate) async fn is_ready(&mut self) -> Result { - let request = Request::new(()); - let response = self.client.is_ready(request).await?.into_inner(); - Ok(response) + pub(crate) async fn is_ready(&mut self) -> bool { + self.client.is_ready(Request::new(())).await.is_ok() } } @@ -144,8 +142,8 @@ mod tests { headers: Default::default(), }; - let resp = client.is_ready().await?; - assert_eq!(resp.ready, true); + let resp = client.is_ready().await; + assert_eq!(resp, true); let resp = client.transform_fn(message).await?; assert_eq!(resp.len(), 1); From 175f95279e5d735defdb5efc09ade82ccff20d0d Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Sun, 11 Aug 2024 20:42:41 -0700 Subject: [PATCH 15/23] chore: rename proto name for sourcetransform (#1921) Signed-off-by: Vigith Maurice --- .../v1/{transform.proto => sourcetransform.proto} | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) rename pkg/apis/proto/sourcetransform/v1/{transform.proto => sourcetransform.proto} (98%) diff --git a/pkg/apis/proto/sourcetransform/v1/transform.proto b/pkg/apis/proto/sourcetransform/v1/sourcetransform.proto similarity index 98% rename from pkg/apis/proto/sourcetransform/v1/transform.proto rename to pkg/apis/proto/sourcetransform/v1/sourcetransform.proto index c2d0a7182e..b93d82b9a8 100644 --- a/pkg/apis/proto/sourcetransform/v1/transform.proto +++ b/pkg/apis/proto/sourcetransform/v1/sourcetransform.proto @@ -42,6 +42,7 @@ message SourceTransformRequest { bytes value = 2; google.protobuf.Timestamp event_time = 3; google.protobuf.Timestamp watermark = 4; + map headers = 5; } /** @@ -62,4 +63,4 @@ message SourceTransformResponse { */ message ReadyResponse { bool ready = 1; -} \ No newline at end of file +} From adcad2a4772c6b0dee98f1ca5a4d13443d8fd86e Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Mon, 12 Aug 2024 22:12:23 +0530 Subject: [PATCH 16/23] chore: reorganize rust code (#1922) Signed-off-by: Vigith Maurice Signed-off-by: Yashash H L Co-authored-by: Vigith Maurice --- .github/workflows/ci.yaml | 9 +- Dockerfile | 52 +++---- Makefile | 2 +- .../numaflow/v1alpha1/mono_vertex_types.go | 2 +- pkg/apis/numaflow/v1alpha1/vertex_types.go | 6 +- {serving => rust}/.dockerignore | 0 {serving => rust}/.rustfmt.toml | 0 {serving => rust}/Cargo.lock | 129 +++++++++--------- rust/Cargo.toml | 19 +++ {serving => rust}/Dockerfile | 40 +++--- {serving => rust}/Makefile | 0 {serving => rust}/README.md | 0 {serving => rust}/backoff/Cargo.toml | 0 {serving => rust}/backoff/src/lib.rs | 0 {serving => rust}/backoff/src/retry.rs | 0 {serving => rust}/backoff/src/strategy.rs | 0 .../backoff/src/strategy/fixed.rs | 0 .../monovertex}/Cargo.toml | 3 +- .../source-sink => rust/monovertex}/build.rs | 0 .../monovertex}/proto/sink.proto | 0 .../monovertex}/proto/source.proto | 0 .../monovertex}/proto/sourcetransform.proto | 0 .../monovertex}/src/config.rs | 0 .../monovertex}/src/error.rs | 0 .../monovertex}/src/forwarder.rs | 0 .../monovertex}/src/lib.rs | 88 +++++++++++- .../monovertex}/src/message.rs | 0 .../monovertex}/src/metrics.rs | 0 .../monovertex}/src/server_info.rs | 0 .../monovertex}/src/shared.rs | 0 .../monovertex}/src/sink.rs | 23 +++- .../monovertex}/src/source.rs | 23 +++- .../monovertex}/src/transformer.rs | 22 ++- {serving => rust}/numaflow-models/Cargo.toml | 0 {serving => rust}/numaflow-models/Makefile | 0 .../numaflow-models/hack/swaggerfilter.py | 0 .../numaflow-models/src/apis/configuration.rs | 0 .../numaflow-models/src/apis/mod.rs | 0 {serving => rust}/numaflow-models/src/lib.rs | 0 .../src/models/abstract_pod_template.rs | 0 .../src/models/abstract_sink.rs | 0 .../src/models/abstract_vertex.rs | 0 .../src/models/authorization.rs | 0 .../numaflow-models/src/models/basic_auth.rs | 0 .../numaflow-models/src/models/blackhole.rs | 0 .../src/models/buffer_service_config.rs | 0 .../src/models/combined_edge.rs | 0 .../numaflow-models/src/models/container.rs | 0 .../src/models/container_builder.rs | 0 .../src/models/container_template.rs | 0 .../src/models/daemon_template.rs | 0 .../numaflow-models/src/models/edge.rs | 0 .../src/models/fixed_window.rs | 0 .../src/models/forward_conditions.rs | 0 .../numaflow-models/src/models/function.rs | 0 .../src/models/generator_source.rs | 0 .../src/models/get_container_req.rs | 0 .../src/models/get_daemon_deployment_req.rs | 0 .../models/get_jet_stream_service_spec_req.rs | 0 .../get_jet_stream_stateful_set_spec_req.rs | 0 .../get_mono_vertex_daemon_deployment_req.rs | 0 .../models/get_mono_vertex_pod_spec_req.rs | 0 .../src/models/get_redis_service_spec_req.rs | 0 .../models/get_redis_stateful_set_spec_req.rs | 0 .../models/get_side_input_deployment_req.rs | 0 .../src/models/get_vertex_pod_spec_req.rs | 0 .../numaflow-models/src/models/group_by.rs | 0 .../numaflow-models/src/models/gssapi.rs | 0 .../numaflow-models/src/models/http_source.rs | 0 .../numaflow-models/src/models/idle_source.rs | 0 .../src/models/inter_step_buffer_service.rs | 0 .../models/inter_step_buffer_service_list.rs | 0 .../models/inter_step_buffer_service_spec.rs | 0 .../inter_step_buffer_service_status.rs | 0 .../src/models/jet_stream_buffer_service.rs | 0 .../src/models/jet_stream_config.rs | 0 .../src/models/jet_stream_source.rs | 0 .../src/models/job_template.rs | 0 .../numaflow-models/src/models/kafka_sink.rs | 0 .../src/models/kafka_source.rs | 0 .../numaflow-models/src/models/lifecycle.rs | 0 .../numaflow-models/src/models/log.rs | 0 .../numaflow-models/src/models/metadata.rs | 0 .../numaflow-models/src/models/mod.rs | 0 .../numaflow-models/src/models/mono_vertex.rs | 0 .../src/models/mono_vertex_limits.rs | 0 .../src/models/mono_vertex_list.rs | 0 .../src/models/mono_vertex_spec.rs | 0 .../src/models/mono_vertex_status.rs | 0 .../src/models/native_redis.rs | 0 .../numaflow-models/src/models/nats_auth.rs | 0 .../numaflow-models/src/models/nats_source.rs | 0 .../numaflow-models/src/models/no_store.rs | 0 .../numaflow-models/src/models/pbq_storage.rs | 0 .../src/models/persistence_strategy.rs | 0 .../numaflow-models/src/models/pipeline.rs | 0 .../src/models/pipeline_limits.rs | 0 .../src/models/pipeline_list.rs | 0 .../src/models/pipeline_spec.rs | 0 .../src/models/pipeline_status.rs | 0 .../src/models/redis_buffer_service.rs | 0 .../src/models/redis_config.rs | 0 .../src/models/redis_settings.rs | 0 .../numaflow-models/src/models/sasl.rs | 0 .../numaflow-models/src/models/sasl_plain.rs | 0 .../numaflow-models/src/models/scale.rs | 0 .../src/models/serving_source.rs | 0 .../src/models/serving_store.rs | 0 .../src/models/session_window.rs | 0 .../numaflow-models/src/models/side_input.rs | 0 .../src/models/side_input_trigger.rs | 0 .../models/side_inputs_manager_template.rs | 0 .../numaflow-models/src/models/sink.rs | 0 .../src/models/sliding_window.rs | 0 .../numaflow-models/src/models/source.rs | 0 .../numaflow-models/src/models/status.rs | 0 .../src/models/tag_conditions.rs | 0 .../numaflow-models/src/models/templates.rs | 0 .../numaflow-models/src/models/tls.rs | 0 .../numaflow-models/src/models/transformer.rs | 0 .../numaflow-models/src/models/ud_sink.rs | 0 .../numaflow-models/src/models/ud_source.rs | 0 .../src/models/ud_transformer.rs | 0 .../numaflow-models/src/models/udf.rs | 0 .../numaflow-models/src/models/vertex.rs | 0 .../src/models/vertex_instance.rs | 0 .../src/models/vertex_limits.rs | 0 .../numaflow-models/src/models/vertex_list.rs | 0 .../numaflow-models/src/models/vertex_spec.rs | 0 .../src/models/vertex_status.rs | 0 .../src/models/vertex_template.rs | 0 .../numaflow-models/src/models/watermark.rs | 0 .../numaflow-models/src/models/window.rs | 0 .../numaflow-models/templates/Cargo.mustache | 0 .../templates/partial_header.mustache | 0 {serving => rust}/servesink/.dockerignore | 0 {serving => rust}/servesink/Cargo.toml | 4 - {serving => rust}/servesink/Dockerfile | 0 .../src/main.rs => rust/servesink/src/lib.rs | 3 +- {serving => rust/serving}/Cargo.toml | 6 +- rust/serving/README.md | 0 {serving => rust/serving}/config/default.toml | 0 .../serving}/config/jetstream.conf | 0 .../serving}/config/pipeline_spec.json | 0 {serving => rust/serving}/src/app.rs | 0 {serving => rust/serving}/src/app/callback.rs | 0 .../serving}/src/app/callback/state.rs | 0 .../serving}/src/app/callback/store.rs | 0 .../src/app/callback/store/memstore.rs | 0 .../src/app/callback/store/redisstore.rs | 0 .../serving}/src/app/direct_proxy.rs | 0 .../serving}/src/app/jetstream_proxy.rs | 4 +- .../serving}/src/app/message_path.rs | 0 {serving => rust/serving}/src/app/response.rs | 0 {serving => rust/serving}/src/app/tracker.rs | 0 {serving => rust/serving}/src/config.rs | 0 {serving => rust/serving}/src/consts.rs | 0 {serving => rust/serving}/src/error.rs | 0 .../src/main.rs => rust/serving/src/lib.rs | 18 ++- {serving => rust/serving}/src/metrics.rs | 0 {serving => rust/serving}/src/pipeline.rs | 0 rust/src/bin/main.rs | 20 +++ serving/extras/upstreams/Cargo.toml | 20 --- .../extras/upstreams/src/bin/simple_proxy.rs | 94 ------------- serving/source-sink/Dockerfile | 19 --- serving/source-sink/src/main.rs | 89 ------------ 166 files changed, 310 insertions(+), 385 deletions(-) rename {serving => rust}/.dockerignore (100%) rename {serving => rust}/.rustfmt.toml (100%) rename {serving => rust}/Cargo.lock (99%) create mode 100644 rust/Cargo.toml rename {serving => rust}/Dockerfile (58%) rename {serving => rust}/Makefile (100%) rename {serving => rust}/README.md (100%) rename {serving => rust}/backoff/Cargo.toml (100%) rename {serving => rust}/backoff/src/lib.rs (100%) rename {serving => rust}/backoff/src/retry.rs (100%) rename {serving => rust}/backoff/src/strategy.rs (100%) rename {serving => rust}/backoff/src/strategy/fixed.rs (100%) rename {serving/source-sink => rust/monovertex}/Cargo.toml (95%) rename {serving/source-sink => rust/monovertex}/build.rs (100%) rename {serving/source-sink => rust/monovertex}/proto/sink.proto (100%) rename {serving/source-sink => rust/monovertex}/proto/source.proto (100%) rename {serving/source-sink => rust/monovertex}/proto/sourcetransform.proto (100%) rename {serving/source-sink => rust/monovertex}/src/config.rs (100%) rename {serving/source-sink => rust/monovertex}/src/error.rs (100%) rename {serving/source-sink => rust/monovertex}/src/forwarder.rs (100%) rename {serving/source-sink => rust/monovertex}/src/lib.rs (79%) rename {serving/source-sink => rust/monovertex}/src/message.rs (100%) rename {serving/source-sink => rust/monovertex}/src/metrics.rs (100%) rename {serving/source-sink => rust/monovertex}/src/server_info.rs (100%) rename {serving/source-sink => rust/monovertex}/src/shared.rs (100%) rename {serving/source-sink => rust/monovertex}/src/sink.rs (91%) rename {serving/source-sink => rust/monovertex}/src/source.rs (94%) rename {serving/source-sink => rust/monovertex}/src/transformer.rs (91%) rename {serving => rust}/numaflow-models/Cargo.toml (100%) rename {serving => rust}/numaflow-models/Makefile (100%) rename {serving => rust}/numaflow-models/hack/swaggerfilter.py (100%) rename {serving => rust}/numaflow-models/src/apis/configuration.rs (100%) rename {serving => rust}/numaflow-models/src/apis/mod.rs (100%) rename {serving => rust}/numaflow-models/src/lib.rs (100%) rename {serving => rust}/numaflow-models/src/models/abstract_pod_template.rs (100%) rename {serving => rust}/numaflow-models/src/models/abstract_sink.rs (100%) rename {serving => rust}/numaflow-models/src/models/abstract_vertex.rs (100%) rename {serving => rust}/numaflow-models/src/models/authorization.rs (100%) rename {serving => rust}/numaflow-models/src/models/basic_auth.rs (100%) rename {serving => rust}/numaflow-models/src/models/blackhole.rs (100%) rename {serving => rust}/numaflow-models/src/models/buffer_service_config.rs (100%) rename {serving => rust}/numaflow-models/src/models/combined_edge.rs (100%) rename {serving => rust}/numaflow-models/src/models/container.rs (100%) rename {serving => rust}/numaflow-models/src/models/container_builder.rs (100%) rename {serving => rust}/numaflow-models/src/models/container_template.rs (100%) rename {serving => rust}/numaflow-models/src/models/daemon_template.rs (100%) rename {serving => rust}/numaflow-models/src/models/edge.rs (100%) rename {serving => rust}/numaflow-models/src/models/fixed_window.rs (100%) rename {serving => rust}/numaflow-models/src/models/forward_conditions.rs (100%) rename {serving => rust}/numaflow-models/src/models/function.rs (100%) rename {serving => rust}/numaflow-models/src/models/generator_source.rs (100%) rename {serving => rust}/numaflow-models/src/models/get_container_req.rs (100%) rename {serving => rust}/numaflow-models/src/models/get_daemon_deployment_req.rs (100%) rename {serving => rust}/numaflow-models/src/models/get_jet_stream_service_spec_req.rs (100%) rename {serving => rust}/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs (100%) rename {serving => rust}/numaflow-models/src/models/get_mono_vertex_daemon_deployment_req.rs (100%) rename {serving => rust}/numaflow-models/src/models/get_mono_vertex_pod_spec_req.rs (100%) rename {serving => rust}/numaflow-models/src/models/get_redis_service_spec_req.rs (100%) rename {serving => rust}/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs (100%) rename {serving => rust}/numaflow-models/src/models/get_side_input_deployment_req.rs (100%) rename {serving => rust}/numaflow-models/src/models/get_vertex_pod_spec_req.rs (100%) rename {serving => rust}/numaflow-models/src/models/group_by.rs (100%) rename {serving => rust}/numaflow-models/src/models/gssapi.rs (100%) rename {serving => rust}/numaflow-models/src/models/http_source.rs (100%) rename {serving => rust}/numaflow-models/src/models/idle_source.rs (100%) rename {serving => rust}/numaflow-models/src/models/inter_step_buffer_service.rs (100%) rename {serving => rust}/numaflow-models/src/models/inter_step_buffer_service_list.rs (100%) rename {serving => rust}/numaflow-models/src/models/inter_step_buffer_service_spec.rs (100%) rename {serving => rust}/numaflow-models/src/models/inter_step_buffer_service_status.rs (100%) rename {serving => rust}/numaflow-models/src/models/jet_stream_buffer_service.rs (100%) rename {serving => rust}/numaflow-models/src/models/jet_stream_config.rs (100%) rename {serving => rust}/numaflow-models/src/models/jet_stream_source.rs (100%) rename {serving => rust}/numaflow-models/src/models/job_template.rs (100%) rename {serving => rust}/numaflow-models/src/models/kafka_sink.rs (100%) rename {serving => rust}/numaflow-models/src/models/kafka_source.rs (100%) rename {serving => rust}/numaflow-models/src/models/lifecycle.rs (100%) rename {serving => rust}/numaflow-models/src/models/log.rs (100%) rename {serving => rust}/numaflow-models/src/models/metadata.rs (100%) rename {serving => rust}/numaflow-models/src/models/mod.rs (100%) rename {serving => rust}/numaflow-models/src/models/mono_vertex.rs (100%) rename {serving => rust}/numaflow-models/src/models/mono_vertex_limits.rs (100%) rename {serving => rust}/numaflow-models/src/models/mono_vertex_list.rs (100%) rename {serving => rust}/numaflow-models/src/models/mono_vertex_spec.rs (100%) rename {serving => rust}/numaflow-models/src/models/mono_vertex_status.rs (100%) rename {serving => rust}/numaflow-models/src/models/native_redis.rs (100%) rename {serving => rust}/numaflow-models/src/models/nats_auth.rs (100%) rename {serving => rust}/numaflow-models/src/models/nats_source.rs (100%) rename {serving => rust}/numaflow-models/src/models/no_store.rs (100%) rename {serving => rust}/numaflow-models/src/models/pbq_storage.rs (100%) rename {serving => rust}/numaflow-models/src/models/persistence_strategy.rs (100%) rename {serving => rust}/numaflow-models/src/models/pipeline.rs (100%) rename {serving => rust}/numaflow-models/src/models/pipeline_limits.rs (100%) rename {serving => rust}/numaflow-models/src/models/pipeline_list.rs (100%) rename {serving => rust}/numaflow-models/src/models/pipeline_spec.rs (100%) rename {serving => rust}/numaflow-models/src/models/pipeline_status.rs (100%) rename {serving => rust}/numaflow-models/src/models/redis_buffer_service.rs (100%) rename {serving => rust}/numaflow-models/src/models/redis_config.rs (100%) rename {serving => rust}/numaflow-models/src/models/redis_settings.rs (100%) rename {serving => rust}/numaflow-models/src/models/sasl.rs (100%) rename {serving => rust}/numaflow-models/src/models/sasl_plain.rs (100%) rename {serving => rust}/numaflow-models/src/models/scale.rs (100%) rename {serving => rust}/numaflow-models/src/models/serving_source.rs (100%) rename {serving => rust}/numaflow-models/src/models/serving_store.rs (100%) rename {serving => rust}/numaflow-models/src/models/session_window.rs (100%) rename {serving => rust}/numaflow-models/src/models/side_input.rs (100%) rename {serving => rust}/numaflow-models/src/models/side_input_trigger.rs (100%) rename {serving => rust}/numaflow-models/src/models/side_inputs_manager_template.rs (100%) rename {serving => rust}/numaflow-models/src/models/sink.rs (100%) rename {serving => rust}/numaflow-models/src/models/sliding_window.rs (100%) rename {serving => rust}/numaflow-models/src/models/source.rs (100%) rename {serving => rust}/numaflow-models/src/models/status.rs (100%) rename {serving => rust}/numaflow-models/src/models/tag_conditions.rs (100%) rename {serving => rust}/numaflow-models/src/models/templates.rs (100%) rename {serving => rust}/numaflow-models/src/models/tls.rs (100%) rename {serving => rust}/numaflow-models/src/models/transformer.rs (100%) rename {serving => rust}/numaflow-models/src/models/ud_sink.rs (100%) rename {serving => rust}/numaflow-models/src/models/ud_source.rs (100%) rename {serving => rust}/numaflow-models/src/models/ud_transformer.rs (100%) rename {serving => rust}/numaflow-models/src/models/udf.rs (100%) rename {serving => rust}/numaflow-models/src/models/vertex.rs (100%) rename {serving => rust}/numaflow-models/src/models/vertex_instance.rs (100%) rename {serving => rust}/numaflow-models/src/models/vertex_limits.rs (100%) rename {serving => rust}/numaflow-models/src/models/vertex_list.rs (100%) rename {serving => rust}/numaflow-models/src/models/vertex_spec.rs (100%) rename {serving => rust}/numaflow-models/src/models/vertex_status.rs (100%) rename {serving => rust}/numaflow-models/src/models/vertex_template.rs (100%) rename {serving => rust}/numaflow-models/src/models/watermark.rs (100%) rename {serving => rust}/numaflow-models/src/models/window.rs (100%) rename {serving => rust}/numaflow-models/templates/Cargo.mustache (100%) rename {serving => rust}/numaflow-models/templates/partial_header.mustache (100%) rename {serving => rust}/servesink/.dockerignore (100%) rename {serving => rust}/servesink/Cargo.toml (88%) rename {serving => rust}/servesink/Dockerfile (100%) rename serving/servesink/src/main.rs => rust/servesink/src/lib.rs (96%) rename {serving => rust/serving}/Cargo.toml (86%) create mode 100644 rust/serving/README.md rename {serving => rust/serving}/config/default.toml (100%) rename {serving => rust/serving}/config/jetstream.conf (100%) rename {serving => rust/serving}/config/pipeline_spec.json (100%) rename {serving => rust/serving}/src/app.rs (100%) rename {serving => rust/serving}/src/app/callback.rs (100%) rename {serving => rust/serving}/src/app/callback/state.rs (100%) rename {serving => rust/serving}/src/app/callback/store.rs (100%) rename {serving => rust/serving}/src/app/callback/store/memstore.rs (100%) rename {serving => rust/serving}/src/app/callback/store/redisstore.rs (100%) rename {serving => rust/serving}/src/app/direct_proxy.rs (100%) rename {serving => rust/serving}/src/app/jetstream_proxy.rs (98%) rename {serving => rust/serving}/src/app/message_path.rs (100%) rename {serving => rust/serving}/src/app/response.rs (100%) rename {serving => rust/serving}/src/app/tracker.rs (100%) rename {serving => rust/serving}/src/config.rs (100%) rename {serving => rust/serving}/src/consts.rs (100%) rename {serving => rust/serving}/src/error.rs (100%) rename serving/src/main.rs => rust/serving/src/lib.rs (87%) rename {serving => rust/serving}/src/metrics.rs (100%) rename {serving => rust/serving}/src/pipeline.rs (100%) create mode 100644 rust/src/bin/main.rs delete mode 100644 serving/extras/upstreams/Cargo.toml delete mode 100644 serving/extras/upstreams/src/bin/simple_proxy.rs delete mode 100644 serving/source-sink/Dockerfile delete mode 100644 serving/source-sink/src/main.rs diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 5c476f088e..064afdb38c 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -107,16 +107,19 @@ jobs: - name: Install grcov run: cargo install grcov + - name: Install Protobuf Compiler + run: sudo apt-get install -y protobuf-compiler + - name: Test Rust - working-directory: ./serving + working-directory: ./rust run: | - CARGO_INCREMENTAL=0 RUSTFLAGS='-Cinstrument-coverage' LLVM_PROFILE_FILE='./target/debug/coverage/cargo-test-%p-%m.profraw' cargo test --all-features + CARGO_INCREMENTAL=0 RUSTFLAGS='-Cinstrument-coverage' LLVM_PROFILE_FILE='./target/debug/coverage/cargo-test-%p-%m.profraw' cargo test --all-features --workspace --all grcov . -s ./target/debug/coverage/ --binary-path ./target/debug/ -t lcov --branch --ignore-not-existing -o ./target/debug/coverage/lcov.info - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v4 with: - files: ./test/profile.cov,./serving/target/debug/coverage/lcov.info + files: ./test/profile.cov,./rust/target/debug/coverage/lcov.info env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/Dockerfile b/Dockerfile index 350a9fe4e0..7796a35b81 100644 --- a/Dockerfile +++ b/Dockerfile @@ -20,42 +20,44 @@ RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/ca RUN apt-get update RUN apt-get install protobuf-compiler -y -RUN cargo new serve +RUN cargo new numaflow # Create a new empty shell project -WORKDIR /serve -RUN cargo new servesink -COPY ./serving/servesink/Cargo.toml ./servesink/ +WORKDIR /numaflow -RUN cargo new extras/upstreams -COPY ./serving/extras/upstreams/Cargo.toml ./extras/upstreams/ +RUN cargo new servesink +COPY ./rust/servesink/Cargo.toml ./servesink/ RUN cargo new backoff -COPY ./serving/backoff/Cargo.toml ./backoff/ +COPY ./rust/backoff/Cargo.toml ./backoff/ RUN cargo new numaflow-models -COPY ./serving/numaflow-models/Cargo.toml ./numaflow-models/ +COPY ./rust/numaflow-models/Cargo.toml ./numaflow-models/ + +RUN cargo new monovertex +COPY ./rust/monovertex/Cargo.toml ./monovertex/ -RUN cargo new source-sink -COPY ./serving/source-sink/Cargo.toml ./source-sink/ +RUN cargo new serving +COPY ./rust/serving/Cargo.toml ./serving/Cargo.toml # Copy all Cargo.toml and Cargo.lock files for caching dependencies -COPY ./serving/Cargo.toml ./serving/Cargo.lock ./ +COPY ./rust/Cargo.toml ./rust/Cargo.lock ./ -# Build only the dependencies to cache them -RUN cargo build --release +# Build to cache dependencies +RUN mkdir -p src/bin && echo "fn main() {}" > src/bin/main.rs && \ + cargo build --workspace --all --release # Copy the actual source code files of the main project and the subprojects -COPY ./serving/src ./src -COPY ./serving/servesink/src ./servesink/src -COPY ./serving/extras/upstreams/src ./extras/upstreams/src -COPY ./serving/backoff/src ./backoff/src -COPY ./serving/numaflow-models/src ./numaflow-models/src -COPY ./serving/source-sink/src ./source-sink/src -COPY ./serving/source-sink/build.rs ./source-sink/build.rs -COPY ./serving/source-sink/proto ./source-sink/proto +COPY ./rust/src ./src +COPY ./rust/servesink/src ./servesink/src +COPY ./rust/backoff/src ./backoff/src +COPY ./rust/numaflow-models/src ./numaflow-models/src +COPY ./rust/serving/src ./serving/src +COPY ./rust/monovertex/src ./monovertex/src +COPY ./rust/monovertex/build.rs ./monovertex/build.rs +COPY ./rust/monovertex/proto ./monovertex/proto # Build the real binaries -RUN touch src/main.rs servesink/src/main.rs numaflow-models/src/main.rs source-sink/src/main.rs && \ +RUN touch src/bin/main.rs && \ cargo build --workspace --all --release #################################################################################################### @@ -70,10 +72,8 @@ RUN apt-get update && apt-get install -y libssl3 COPY --from=base /bin/numaflow /bin/numaflow COPY ui/build /ui/build -COPY --from=extension-base /serve/target/release/serve /bin/serve -COPY --from=extension-base /serve/target/release/sourcer-sinker /bin/sourcer-sinker - -COPY ./serving/config config +COPY --from=extension-base /numaflow/target/release/numaflow /bin/numaflow-rs +COPY ./rust/serving/config config ENTRYPOINT [ "/bin/numaflow" ] diff --git a/Makefile b/Makefile index 1c11b01583..2c6e30d68c 100644 --- a/Makefile +++ b/Makefile @@ -195,7 +195,7 @@ codegen: $(MAKE) manifests rm -rf ./vendor go mod tidy - $(MAKE) --directory serving/numaflow-models generate + $(MAKE) --directory rust/numaflow-models generate clean: -rm -rf ${CURRENT_DIR}/dist diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index 7dd39fd2e8..b4a372ae45 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -403,7 +403,7 @@ func (mvspec MonoVertexSpec) DeepCopyWithoutReplicas() MonoVertexSpec { func (mvspec MonoVertexSpec) buildContainers(req getContainerReq) []corev1.Container { mainContainer := containerBuilder{}. - init(req).command(MonoVertexBinary).build() + init(req).command(NumaflowRustBinary).args("--monovertex").build() containers := []corev1.Container{mainContainer} if mvspec.Source.UDSource != nil { // Only support UDSource for now. diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types.go b/pkg/apis/numaflow/v1alpha1/vertex_types.go index 71cd9c7171..b4a26f4815 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types.go @@ -50,8 +50,7 @@ const ( VertexTypeReduceUDF VertexType = "ReduceUDF" ) -const ServingBinary = "/bin/serve" -const MonoVertexBinary = "/bin/sourcer-sinker" +const NumaflowRustBinary = "/bin/numaflow-rs" // +genclient // +kubebuilder:object:root=true @@ -350,8 +349,9 @@ func (v Vertex) getServingContainer(req GetVertexPodSpecReq) (corev1.Container, Env: req.Env, Image: req.Image, ImagePullPolicy: req.PullPolicy, - Command: []string{ServingBinary}, // we use the same image, but we execute the extension binary + Command: []string{NumaflowRustBinary}, // we use the same image, but we execute the extension binary Resources: req.DefaultResources, + Args: []string{"--serving"}, } // set the common envs diff --git a/serving/.dockerignore b/rust/.dockerignore similarity index 100% rename from serving/.dockerignore rename to rust/.dockerignore diff --git a/serving/.rustfmt.toml b/rust/.rustfmt.toml similarity index 100% rename from serving/.rustfmt.toml rename to rust/.rustfmt.toml diff --git a/serving/Cargo.lock b/rust/Cargo.lock similarity index 99% rename from serving/Cargo.lock rename to rust/Cargo.lock index 30fd1a14db..db2f0404bd 100644 --- a/serving/Cargo.lock +++ b/rust/Cargo.lock @@ -1555,6 +1555,44 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" +[[package]] +name = "monovertex" +version = "0.1.0" +dependencies = [ + "axum", + "axum-server", + "backoff", + "base64 0.22.1", + "bytes", + "chrono", + "hyper-util", + "metrics", + "metrics-exporter-prometheus", + "numaflow 0.1.0 (git+https://github.com/numaproj/numaflow-rs.git?branch=main)", + "numaflow-models", + "once_cell", + "pep440_rs", + "prost", + "prost-types", + "rcgen", + "rustls", + "semver", + "serde", + "serde_json", + "tempfile", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tonic", + "tonic-build", + "tower", + "tracing", + "tracing-subscriber", + "trait-variant", + "uuid", +] + [[package]] name = "multimap" version = "0.10.0" @@ -1666,6 +1704,18 @@ dependencies = [ "libc", ] +[[package]] +name = "numaflow" +version = "0.1.0" +dependencies = [ + "backoff", + "monovertex", + "servesink", + "serving", + "tokio", + "tracing", +] + [[package]] name = "numaflow" version = "0.1.0" @@ -2601,7 +2651,19 @@ dependencies = [ ] [[package]] -name = "serve" +name = "servesink" +version = "0.1.0" +dependencies = [ + "numaflow 0.1.0 (git+https://github.com/numaproj/numaflow-rs.git?branch=main)", + "reqwest 0.12.5", + "tokio", + "tonic", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "serving" version = "0.1.0" dependencies = [ "async-nats", @@ -2627,18 +2689,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "servesink" -version = "0.1.0" -dependencies = [ - "numaflow", - "reqwest 0.12.5", - "tokio", - "tonic", - "tracing", - "tracing-subscriber", -] - [[package]] name = "sha1" version = "0.10.6" @@ -2744,43 +2794,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "sourcer-sinker" -version = "0.1.0" -dependencies = [ - "axum", - "axum-server", - "base64 0.22.1", - "bytes", - "chrono", - "hyper-util", - "metrics", - "metrics-exporter-prometheus", - "numaflow", - "numaflow-models", - "once_cell", - "pep440_rs", - "prost", - "prost-types", - "rcgen", - "rustls", - "semver", - "serde", - "serde_json", - "tempfile", - "thiserror", - "tokio", - "tokio-stream", - "tokio-util", - "tonic", - "tonic-build", - "tower", - "tracing", - "tracing-subscriber", - "trait-variant", - "uuid", -] - [[package]] name = "spin" version = "0.9.8" @@ -3321,22 +3334,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" -[[package]] -name = "upstreams" -version = "0.1.0" -dependencies = [ - "axum", - "axum-macros", - "http-body-util", - "serde", - "serde_json", - "tokio", - "tower", - "tower-http", - "tracing", - "tracing-subscriber", -] - [[package]] name = "url" version = "2.5.2" diff --git a/rust/Cargo.toml b/rust/Cargo.toml new file mode 100644 index 0000000000..45a630b732 --- /dev/null +++ b/rust/Cargo.toml @@ -0,0 +1,19 @@ +workspace = { members = ["backoff", "numaflow-models", "servesink", "serving", "monovertex"] } + +[[bin]] +name = "numaflow" +path = "src/bin/main.rs" + +[package] +name = "numaflow" +version = "0.1.0" +edition = "2021" + + +[dependencies] +tokio = "1.39.2" +backoff = { path = "backoff" } +servesink = { path = "servesink" } +serving = { path = "serving" } +monovertex = { path = "monovertex" } +tracing = "0.1.40" diff --git a/serving/Dockerfile b/rust/Dockerfile similarity index 58% rename from serving/Dockerfile rename to rust/Dockerfile index 697cfd6f27..e680b85eac 100644 --- a/serving/Dockerfile +++ b/rust/Dockerfile @@ -7,42 +7,44 @@ RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/ca RUN apt-get update RUN apt-get install protobuf-compiler -y -RUN cargo new serve +RUN cargo new numaflow # Create a new empty shell project -WORKDIR /serve -RUN cargo new servesink -COPY ./servesink/Cargo.toml ./servesink/ +WORKDIR /numaflow -RUN cargo new extras/upstreams -COPY ./extras/upstreams/Cargo.toml ./extras/upstreams/ +RUN cargo new servesink +COPY ./servesink/Cargo.toml ./servesink/Cargo.toml RUN cargo new backoff COPY ./backoff/Cargo.toml ./backoff/Cargo.toml RUN cargo new numaflow-models -COPY ./numaflow-models/Cargo.toml ./numaflow-models/ +COPY ./numaflow-models/Cargo.toml ./numaflow-models/Cargo.toml -RUN cargo new source-sink -COPY ./source-sink/Cargo.toml ./source-sink/Cargo.toml +RUN cargo new monovertex +COPY monovertex/Cargo.toml ./monovertex/Cargo.toml + +RUN cargo new serving +COPY ./serving/Cargo.toml ./serving/Cargo.toml # Copy all Cargo.toml and Cargo.lock files for caching dependencies COPY ./Cargo.toml ./Cargo.lock ./ -# Build only the dependencies to cache them -RUN cargo build --release +# Build to cache dependencies +RUN mkdir -p src/bin && echo "fn main() {}" > src/bin/main.rs && \ + cargo build --workspace --all --release # Copy the actual source code files of the main project and the subprojects COPY ./src ./src COPY ./servesink/src ./servesink/src -COPY ./extras/upstreams/src ./extras/upstreams/src COPY ./backoff/src ./backoff/src COPY ./numaflow-models/src ./numaflow-models/src -COPY ./source-sink/src ./source-sink/src -COPY ./source-sink/build.rs ./source-sink/build.rs -COPY ./source-sink/proto ./source-sink/proto +COPY ./serving/src ./serving/src +COPY monovertex/src ./monovertex/src +COPY monovertex/build.rs ./monovertex/build.rs +COPY monovertex/proto ./monovertex/proto # Build the real binaries -RUN touch src/main.rs servesink/src/main.rs numaflow-models/src/main.rs source-sink/src/main.rs && \ +RUN touch src/bin/main.rs && \ cargo build --workspace --all --release # Use a lightweight image for the runtime @@ -50,7 +52,7 @@ FROM debian:bookworm as numaflow-ext RUN apt-get update && apt-get install -y libssl3 -COPY --from=builder /serve/target/release/ . -COPY ./config config +COPY --from=builder /numaflow/target/release/ . +COPY serving/config config -ENTRYPOINT ["./serve"] \ No newline at end of file +ENTRYPOINT ["./numaflow"] \ No newline at end of file diff --git a/serving/Makefile b/rust/Makefile similarity index 100% rename from serving/Makefile rename to rust/Makefile diff --git a/serving/README.md b/rust/README.md similarity index 100% rename from serving/README.md rename to rust/README.md diff --git a/serving/backoff/Cargo.toml b/rust/backoff/Cargo.toml similarity index 100% rename from serving/backoff/Cargo.toml rename to rust/backoff/Cargo.toml diff --git a/serving/backoff/src/lib.rs b/rust/backoff/src/lib.rs similarity index 100% rename from serving/backoff/src/lib.rs rename to rust/backoff/src/lib.rs diff --git a/serving/backoff/src/retry.rs b/rust/backoff/src/retry.rs similarity index 100% rename from serving/backoff/src/retry.rs rename to rust/backoff/src/retry.rs diff --git a/serving/backoff/src/strategy.rs b/rust/backoff/src/strategy.rs similarity index 100% rename from serving/backoff/src/strategy.rs rename to rust/backoff/src/strategy.rs diff --git a/serving/backoff/src/strategy/fixed.rs b/rust/backoff/src/strategy/fixed.rs similarity index 100% rename from serving/backoff/src/strategy/fixed.rs rename to rust/backoff/src/strategy/fixed.rs diff --git a/serving/source-sink/Cargo.toml b/rust/monovertex/Cargo.toml similarity index 95% rename from serving/source-sink/Cargo.toml rename to rust/monovertex/Cargo.toml index 9813d52b1e..3e98b10d69 100644 --- a/serving/source-sink/Cargo.toml +++ b/rust/monovertex/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "sourcer-sinker" +name = "monovertex" version = "0.1.0" edition = "2021" @@ -32,6 +32,7 @@ rustls = { version = "0.23.12", features = ["aws_lc_rs"] } serde = { version = "1.0.204", features = ["derive"] } semver = "1.0" pep440_rs = "0.6.6" +backoff = { path = "../backoff" } [dev-dependencies] tower = "0.4.13" diff --git a/serving/source-sink/build.rs b/rust/monovertex/build.rs similarity index 100% rename from serving/source-sink/build.rs rename to rust/monovertex/build.rs diff --git a/serving/source-sink/proto/sink.proto b/rust/monovertex/proto/sink.proto similarity index 100% rename from serving/source-sink/proto/sink.proto rename to rust/monovertex/proto/sink.proto diff --git a/serving/source-sink/proto/source.proto b/rust/monovertex/proto/source.proto similarity index 100% rename from serving/source-sink/proto/source.proto rename to rust/monovertex/proto/source.proto diff --git a/serving/source-sink/proto/sourcetransform.proto b/rust/monovertex/proto/sourcetransform.proto similarity index 100% rename from serving/source-sink/proto/sourcetransform.proto rename to rust/monovertex/proto/sourcetransform.proto diff --git a/serving/source-sink/src/config.rs b/rust/monovertex/src/config.rs similarity index 100% rename from serving/source-sink/src/config.rs rename to rust/monovertex/src/config.rs diff --git a/serving/source-sink/src/error.rs b/rust/monovertex/src/error.rs similarity index 100% rename from serving/source-sink/src/error.rs rename to rust/monovertex/src/error.rs diff --git a/serving/source-sink/src/forwarder.rs b/rust/monovertex/src/forwarder.rs similarity index 100% rename from serving/source-sink/src/forwarder.rs rename to rust/monovertex/src/forwarder.rs diff --git a/serving/source-sink/src/lib.rs b/rust/monovertex/src/lib.rs similarity index 79% rename from serving/source-sink/src/lib.rs rename to rust/monovertex/src/lib.rs index 15b59e537f..88f7bb9c19 100644 --- a/serving/source-sink/src/lib.rs +++ b/rust/monovertex/src/lib.rs @@ -1,6 +1,4 @@ -use std::net::SocketAddr; -use std::time::Duration; - +pub(crate) use self::error::Result; use crate::config::config; pub(crate) use crate::error::Error; use crate::forwarder::Forwarder; @@ -8,11 +6,15 @@ use crate::metrics::{start_metrics_https_server, MetricsState}; use crate::sink::{SinkClient, SinkConfig}; use crate::source::{SourceClient, SourceConfig}; use crate::transformer::{TransformerClient, TransformerConfig}; +use std::net::SocketAddr; +use std::time::Duration; +use tokio::signal; +use tokio::task::JoinHandle; use tokio::time::sleep; use tokio_util::sync::CancellationToken; +use tracing::level_filters::LevelFilter; use tracing::{error, info, warn}; - -pub(crate) use self::error::Result; +use tracing_subscriber::EnvFilter; /// SourcerSinker orchestrates data movement from the Source to the Sink via the optional SourceTransformer. /// The forward-a-chunk executes the following in an infinite loop till a shutdown signal is received: @@ -38,6 +40,82 @@ pub mod message; mod server_info; pub(crate) mod shared; +pub async fn mono_vertex() { + // Initialize the logger + tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::INFO.into()) + .parse_lossy(&config().log_level), + ) + .with_target(false) + .init(); + + // Initialize the source, sink and transformer configurations + // We are using the default configurations for now. + let source_config = SourceConfig { + max_message_size: config().grpc_max_message_size, + ..Default::default() + }; + + let sink_config = SinkConfig { + max_message_size: config().grpc_max_message_size, + ..Default::default() + }; + + let transformer_config = if config().is_transformer_enabled { + Some(TransformerConfig { + max_message_size: config().grpc_max_message_size, + ..Default::default() + }) + } else { + None + }; + + let cln_token = CancellationToken::new(); + let shutdown_cln_token = cln_token.clone(); + // wait for SIG{INT,TERM} and invoke cancellation token. + let shutdown_handle: JoinHandle> = tokio::spawn(async move { + shutdown_signal().await; + shutdown_cln_token.cancel(); + Ok(()) + }); + + // Run the forwarder with cancellation token. + if let Err(e) = init(source_config, sink_config, transformer_config, cln_token).await { + error!("Application error: {:?}", e); + + // abort the task since we have an error + if !shutdown_handle.is_finished() { + shutdown_handle.abort(); + } + } + + info!("Gracefully Exiting..."); +} + +async fn shutdown_signal() { + let ctrl_c = async { + signal::ctrl_c() + .await + .expect("failed to install Ctrl+C handler"); + info!("Received Ctrl+C signal"); + }; + + let terminate = async { + signal::unix::signal(signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + info!("Received terminate signal"); + }; + + tokio::select! { + _ = ctrl_c => {}, + _ = terminate => {}, + } +} + /// forwards a chunk of data from the source to the sink via an optional transformer. /// It takes an optional custom_shutdown_rx for shutting down the forwarder, useful for testing. pub async fn init( diff --git a/serving/source-sink/src/message.rs b/rust/monovertex/src/message.rs similarity index 100% rename from serving/source-sink/src/message.rs rename to rust/monovertex/src/message.rs diff --git a/serving/source-sink/src/metrics.rs b/rust/monovertex/src/metrics.rs similarity index 100% rename from serving/source-sink/src/metrics.rs rename to rust/monovertex/src/metrics.rs diff --git a/serving/source-sink/src/server_info.rs b/rust/monovertex/src/server_info.rs similarity index 100% rename from serving/source-sink/src/server_info.rs rename to rust/monovertex/src/server_info.rs diff --git a/serving/source-sink/src/shared.rs b/rust/monovertex/src/shared.rs similarity index 100% rename from serving/source-sink/src/shared.rs rename to rust/monovertex/src/shared.rs diff --git a/serving/source-sink/src/sink.rs b/rust/monovertex/src/sink.rs similarity index 91% rename from serving/source-sink/src/sink.rs rename to rust/monovertex/src/sink.rs index ab9a0f49c2..167524050c 100644 --- a/serving/source-sink/src/sink.rs +++ b/rust/monovertex/src/sink.rs @@ -1,14 +1,17 @@ -use tonic::transport::Channel; -use tonic::Request; - -use crate::error::Result; +use crate::error::{Error, Result}; use crate::message::Message; use crate::shared::connect_with_uds; +use backoff::retry::Retry; +use backoff::strategy::fixed; +use tonic::transport::Channel; +use tonic::Request; pub mod proto { tonic::include_proto!("sink.v1"); } +const RECONNECT_INTERVAL: u64 = 1000; +const MAX_RECONNECT_ATTEMPTS: usize = 5; const SINK_SOCKET: &str = "/var/run/numaflow/sink.sock"; const SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/sinker-server-info"; @@ -38,7 +41,16 @@ pub struct SinkClient { impl SinkClient { pub(crate) async fn connect(config: SinkConfig) -> Result { - let channel = connect_with_uds(config.socket_path.into()).await?; + let interval = + fixed::Interval::from_millis(RECONNECT_INTERVAL).take(MAX_RECONNECT_ATTEMPTS); + + let channel = Retry::retry( + interval, + || async { connect_with_uds(config.socket_path.clone().into()).await }, + |_: &Error| true, + ) + .await?; + let client = proto::sink_client::SinkClient::new(channel) .max_decoding_message_size(config.max_message_size) .max_encoding_message_size(config.max_message_size); @@ -65,6 +77,7 @@ impl SinkClient { .sink_fn(tokio_stream::wrappers::ReceiverStream::new(rx)) .await? .into_inner(); + Ok(response) } diff --git a/serving/source-sink/src/source.rs b/rust/monovertex/src/source.rs similarity index 94% rename from serving/source-sink/src/source.rs rename to rust/monovertex/src/source.rs index 3e4ec30d40..a58922d5c7 100644 --- a/serving/source-sink/src/source.rs +++ b/rust/monovertex/src/source.rs @@ -1,17 +1,19 @@ +use crate::error::{Error, Result}; +use crate::message::{Message, Offset}; +use crate::shared::connect_with_uds; +use backoff::retry::Retry; +use backoff::strategy::fixed; use base64::prelude::BASE64_STANDARD; use base64::Engine; use tokio_stream::StreamExt; use tonic::transport::Channel; use tonic::Request; -use crate::error::{Error, Result}; -use crate::message::{Message, Offset}; -use crate::shared::connect_with_uds; - pub mod proto { tonic::include_proto!("source.v1"); } - +const RECONNECT_INTERVAL: u64 = 1000; +const MAX_RECONNECT_ATTEMPTS: usize = 5; const SOURCE_SOCKET: &str = "/var/run/numaflow/source.sock"; const SOURCE_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcer-server-info"; @@ -41,7 +43,16 @@ pub(crate) struct SourceClient { impl SourceClient { pub(crate) async fn connect(config: SourceConfig) -> Result { - let channel = connect_with_uds(config.socket_path.into()).await?; + let interval = + fixed::Interval::from_millis(RECONNECT_INTERVAL).take(MAX_RECONNECT_ATTEMPTS); + + let channel = Retry::retry( + interval, + || async { connect_with_uds(config.socket_path.clone().into()).await }, + |_: &Error| true, + ) + .await?; + let client = proto::source_client::SourceClient::new(channel) .max_encoding_message_size(config.max_message_size) .max_decoding_message_size(config.max_message_size); diff --git a/serving/source-sink/src/transformer.rs b/rust/monovertex/src/transformer.rs similarity index 91% rename from serving/source-sink/src/transformer.rs rename to rust/monovertex/src/transformer.rs index 5a3f70f73f..eeabffe6fc 100644 --- a/serving/source-sink/src/transformer.rs +++ b/rust/monovertex/src/transformer.rs @@ -1,15 +1,18 @@ -use tonic::transport::Channel; -use tonic::Request; - -use crate::error::Result; +use crate::error::{Error, Result}; use crate::message::Message; use crate::shared::{connect_with_uds, utc_from_timestamp}; use crate::transformer::proto::SourceTransformRequest; +use backoff::retry::Retry; +use backoff::strategy::fixed; +use tonic::transport::Channel; +use tonic::Request; pub mod proto { tonic::include_proto!("sourcetransformer.v1"); } +const RECONNECT_INTERVAL: u64 = 1000; +const MAX_RECONNECT_ATTEMPTS: usize = 5; const TRANSFORMER_SOCKET: &str = "/var/run/numaflow/sourcetransform.sock"; const TRANSFORMER_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcetransformer-server-info"; @@ -39,7 +42,16 @@ pub struct TransformerClient { impl TransformerClient { pub(crate) async fn connect(config: TransformerConfig) -> Result { - let channel = connect_with_uds(config.socket_path.into()).await?; + let interval = + fixed::Interval::from_millis(RECONNECT_INTERVAL).take(MAX_RECONNECT_ATTEMPTS); + + let channel = Retry::retry( + interval, + || async { connect_with_uds(config.socket_path.clone().into()).await }, + |_: &Error| true, + ) + .await?; + let client = proto::source_transform_client::SourceTransformClient::new(channel) .max_decoding_message_size(config.max_message_size) .max_encoding_message_size(config.max_message_size); diff --git a/serving/numaflow-models/Cargo.toml b/rust/numaflow-models/Cargo.toml similarity index 100% rename from serving/numaflow-models/Cargo.toml rename to rust/numaflow-models/Cargo.toml diff --git a/serving/numaflow-models/Makefile b/rust/numaflow-models/Makefile similarity index 100% rename from serving/numaflow-models/Makefile rename to rust/numaflow-models/Makefile diff --git a/serving/numaflow-models/hack/swaggerfilter.py b/rust/numaflow-models/hack/swaggerfilter.py similarity index 100% rename from serving/numaflow-models/hack/swaggerfilter.py rename to rust/numaflow-models/hack/swaggerfilter.py diff --git a/serving/numaflow-models/src/apis/configuration.rs b/rust/numaflow-models/src/apis/configuration.rs similarity index 100% rename from serving/numaflow-models/src/apis/configuration.rs rename to rust/numaflow-models/src/apis/configuration.rs diff --git a/serving/numaflow-models/src/apis/mod.rs b/rust/numaflow-models/src/apis/mod.rs similarity index 100% rename from serving/numaflow-models/src/apis/mod.rs rename to rust/numaflow-models/src/apis/mod.rs diff --git a/serving/numaflow-models/src/lib.rs b/rust/numaflow-models/src/lib.rs similarity index 100% rename from serving/numaflow-models/src/lib.rs rename to rust/numaflow-models/src/lib.rs diff --git a/serving/numaflow-models/src/models/abstract_pod_template.rs b/rust/numaflow-models/src/models/abstract_pod_template.rs similarity index 100% rename from serving/numaflow-models/src/models/abstract_pod_template.rs rename to rust/numaflow-models/src/models/abstract_pod_template.rs diff --git a/serving/numaflow-models/src/models/abstract_sink.rs b/rust/numaflow-models/src/models/abstract_sink.rs similarity index 100% rename from serving/numaflow-models/src/models/abstract_sink.rs rename to rust/numaflow-models/src/models/abstract_sink.rs diff --git a/serving/numaflow-models/src/models/abstract_vertex.rs b/rust/numaflow-models/src/models/abstract_vertex.rs similarity index 100% rename from serving/numaflow-models/src/models/abstract_vertex.rs rename to rust/numaflow-models/src/models/abstract_vertex.rs diff --git a/serving/numaflow-models/src/models/authorization.rs b/rust/numaflow-models/src/models/authorization.rs similarity index 100% rename from serving/numaflow-models/src/models/authorization.rs rename to rust/numaflow-models/src/models/authorization.rs diff --git a/serving/numaflow-models/src/models/basic_auth.rs b/rust/numaflow-models/src/models/basic_auth.rs similarity index 100% rename from serving/numaflow-models/src/models/basic_auth.rs rename to rust/numaflow-models/src/models/basic_auth.rs diff --git a/serving/numaflow-models/src/models/blackhole.rs b/rust/numaflow-models/src/models/blackhole.rs similarity index 100% rename from serving/numaflow-models/src/models/blackhole.rs rename to rust/numaflow-models/src/models/blackhole.rs diff --git a/serving/numaflow-models/src/models/buffer_service_config.rs b/rust/numaflow-models/src/models/buffer_service_config.rs similarity index 100% rename from serving/numaflow-models/src/models/buffer_service_config.rs rename to rust/numaflow-models/src/models/buffer_service_config.rs diff --git a/serving/numaflow-models/src/models/combined_edge.rs b/rust/numaflow-models/src/models/combined_edge.rs similarity index 100% rename from serving/numaflow-models/src/models/combined_edge.rs rename to rust/numaflow-models/src/models/combined_edge.rs diff --git a/serving/numaflow-models/src/models/container.rs b/rust/numaflow-models/src/models/container.rs similarity index 100% rename from serving/numaflow-models/src/models/container.rs rename to rust/numaflow-models/src/models/container.rs diff --git a/serving/numaflow-models/src/models/container_builder.rs b/rust/numaflow-models/src/models/container_builder.rs similarity index 100% rename from serving/numaflow-models/src/models/container_builder.rs rename to rust/numaflow-models/src/models/container_builder.rs diff --git a/serving/numaflow-models/src/models/container_template.rs b/rust/numaflow-models/src/models/container_template.rs similarity index 100% rename from serving/numaflow-models/src/models/container_template.rs rename to rust/numaflow-models/src/models/container_template.rs diff --git a/serving/numaflow-models/src/models/daemon_template.rs b/rust/numaflow-models/src/models/daemon_template.rs similarity index 100% rename from serving/numaflow-models/src/models/daemon_template.rs rename to rust/numaflow-models/src/models/daemon_template.rs diff --git a/serving/numaflow-models/src/models/edge.rs b/rust/numaflow-models/src/models/edge.rs similarity index 100% rename from serving/numaflow-models/src/models/edge.rs rename to rust/numaflow-models/src/models/edge.rs diff --git a/serving/numaflow-models/src/models/fixed_window.rs b/rust/numaflow-models/src/models/fixed_window.rs similarity index 100% rename from serving/numaflow-models/src/models/fixed_window.rs rename to rust/numaflow-models/src/models/fixed_window.rs diff --git a/serving/numaflow-models/src/models/forward_conditions.rs b/rust/numaflow-models/src/models/forward_conditions.rs similarity index 100% rename from serving/numaflow-models/src/models/forward_conditions.rs rename to rust/numaflow-models/src/models/forward_conditions.rs diff --git a/serving/numaflow-models/src/models/function.rs b/rust/numaflow-models/src/models/function.rs similarity index 100% rename from serving/numaflow-models/src/models/function.rs rename to rust/numaflow-models/src/models/function.rs diff --git a/serving/numaflow-models/src/models/generator_source.rs b/rust/numaflow-models/src/models/generator_source.rs similarity index 100% rename from serving/numaflow-models/src/models/generator_source.rs rename to rust/numaflow-models/src/models/generator_source.rs diff --git a/serving/numaflow-models/src/models/get_container_req.rs b/rust/numaflow-models/src/models/get_container_req.rs similarity index 100% rename from serving/numaflow-models/src/models/get_container_req.rs rename to rust/numaflow-models/src/models/get_container_req.rs diff --git a/serving/numaflow-models/src/models/get_daemon_deployment_req.rs b/rust/numaflow-models/src/models/get_daemon_deployment_req.rs similarity index 100% rename from serving/numaflow-models/src/models/get_daemon_deployment_req.rs rename to rust/numaflow-models/src/models/get_daemon_deployment_req.rs diff --git a/serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs b/rust/numaflow-models/src/models/get_jet_stream_service_spec_req.rs similarity index 100% rename from serving/numaflow-models/src/models/get_jet_stream_service_spec_req.rs rename to rust/numaflow-models/src/models/get_jet_stream_service_spec_req.rs diff --git a/serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs b/rust/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs similarity index 100% rename from serving/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs rename to rust/numaflow-models/src/models/get_jet_stream_stateful_set_spec_req.rs diff --git a/serving/numaflow-models/src/models/get_mono_vertex_daemon_deployment_req.rs b/rust/numaflow-models/src/models/get_mono_vertex_daemon_deployment_req.rs similarity index 100% rename from serving/numaflow-models/src/models/get_mono_vertex_daemon_deployment_req.rs rename to rust/numaflow-models/src/models/get_mono_vertex_daemon_deployment_req.rs diff --git a/serving/numaflow-models/src/models/get_mono_vertex_pod_spec_req.rs b/rust/numaflow-models/src/models/get_mono_vertex_pod_spec_req.rs similarity index 100% rename from serving/numaflow-models/src/models/get_mono_vertex_pod_spec_req.rs rename to rust/numaflow-models/src/models/get_mono_vertex_pod_spec_req.rs diff --git a/serving/numaflow-models/src/models/get_redis_service_spec_req.rs b/rust/numaflow-models/src/models/get_redis_service_spec_req.rs similarity index 100% rename from serving/numaflow-models/src/models/get_redis_service_spec_req.rs rename to rust/numaflow-models/src/models/get_redis_service_spec_req.rs diff --git a/serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs b/rust/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs similarity index 100% rename from serving/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs rename to rust/numaflow-models/src/models/get_redis_stateful_set_spec_req.rs diff --git a/serving/numaflow-models/src/models/get_side_input_deployment_req.rs b/rust/numaflow-models/src/models/get_side_input_deployment_req.rs similarity index 100% rename from serving/numaflow-models/src/models/get_side_input_deployment_req.rs rename to rust/numaflow-models/src/models/get_side_input_deployment_req.rs diff --git a/serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs b/rust/numaflow-models/src/models/get_vertex_pod_spec_req.rs similarity index 100% rename from serving/numaflow-models/src/models/get_vertex_pod_spec_req.rs rename to rust/numaflow-models/src/models/get_vertex_pod_spec_req.rs diff --git a/serving/numaflow-models/src/models/group_by.rs b/rust/numaflow-models/src/models/group_by.rs similarity index 100% rename from serving/numaflow-models/src/models/group_by.rs rename to rust/numaflow-models/src/models/group_by.rs diff --git a/serving/numaflow-models/src/models/gssapi.rs b/rust/numaflow-models/src/models/gssapi.rs similarity index 100% rename from serving/numaflow-models/src/models/gssapi.rs rename to rust/numaflow-models/src/models/gssapi.rs diff --git a/serving/numaflow-models/src/models/http_source.rs b/rust/numaflow-models/src/models/http_source.rs similarity index 100% rename from serving/numaflow-models/src/models/http_source.rs rename to rust/numaflow-models/src/models/http_source.rs diff --git a/serving/numaflow-models/src/models/idle_source.rs b/rust/numaflow-models/src/models/idle_source.rs similarity index 100% rename from serving/numaflow-models/src/models/idle_source.rs rename to rust/numaflow-models/src/models/idle_source.rs diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service.rs b/rust/numaflow-models/src/models/inter_step_buffer_service.rs similarity index 100% rename from serving/numaflow-models/src/models/inter_step_buffer_service.rs rename to rust/numaflow-models/src/models/inter_step_buffer_service.rs diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service_list.rs b/rust/numaflow-models/src/models/inter_step_buffer_service_list.rs similarity index 100% rename from serving/numaflow-models/src/models/inter_step_buffer_service_list.rs rename to rust/numaflow-models/src/models/inter_step_buffer_service_list.rs diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs b/rust/numaflow-models/src/models/inter_step_buffer_service_spec.rs similarity index 100% rename from serving/numaflow-models/src/models/inter_step_buffer_service_spec.rs rename to rust/numaflow-models/src/models/inter_step_buffer_service_spec.rs diff --git a/serving/numaflow-models/src/models/inter_step_buffer_service_status.rs b/rust/numaflow-models/src/models/inter_step_buffer_service_status.rs similarity index 100% rename from serving/numaflow-models/src/models/inter_step_buffer_service_status.rs rename to rust/numaflow-models/src/models/inter_step_buffer_service_status.rs diff --git a/serving/numaflow-models/src/models/jet_stream_buffer_service.rs b/rust/numaflow-models/src/models/jet_stream_buffer_service.rs similarity index 100% rename from serving/numaflow-models/src/models/jet_stream_buffer_service.rs rename to rust/numaflow-models/src/models/jet_stream_buffer_service.rs diff --git a/serving/numaflow-models/src/models/jet_stream_config.rs b/rust/numaflow-models/src/models/jet_stream_config.rs similarity index 100% rename from serving/numaflow-models/src/models/jet_stream_config.rs rename to rust/numaflow-models/src/models/jet_stream_config.rs diff --git a/serving/numaflow-models/src/models/jet_stream_source.rs b/rust/numaflow-models/src/models/jet_stream_source.rs similarity index 100% rename from serving/numaflow-models/src/models/jet_stream_source.rs rename to rust/numaflow-models/src/models/jet_stream_source.rs diff --git a/serving/numaflow-models/src/models/job_template.rs b/rust/numaflow-models/src/models/job_template.rs similarity index 100% rename from serving/numaflow-models/src/models/job_template.rs rename to rust/numaflow-models/src/models/job_template.rs diff --git a/serving/numaflow-models/src/models/kafka_sink.rs b/rust/numaflow-models/src/models/kafka_sink.rs similarity index 100% rename from serving/numaflow-models/src/models/kafka_sink.rs rename to rust/numaflow-models/src/models/kafka_sink.rs diff --git a/serving/numaflow-models/src/models/kafka_source.rs b/rust/numaflow-models/src/models/kafka_source.rs similarity index 100% rename from serving/numaflow-models/src/models/kafka_source.rs rename to rust/numaflow-models/src/models/kafka_source.rs diff --git a/serving/numaflow-models/src/models/lifecycle.rs b/rust/numaflow-models/src/models/lifecycle.rs similarity index 100% rename from serving/numaflow-models/src/models/lifecycle.rs rename to rust/numaflow-models/src/models/lifecycle.rs diff --git a/serving/numaflow-models/src/models/log.rs b/rust/numaflow-models/src/models/log.rs similarity index 100% rename from serving/numaflow-models/src/models/log.rs rename to rust/numaflow-models/src/models/log.rs diff --git a/serving/numaflow-models/src/models/metadata.rs b/rust/numaflow-models/src/models/metadata.rs similarity index 100% rename from serving/numaflow-models/src/models/metadata.rs rename to rust/numaflow-models/src/models/metadata.rs diff --git a/serving/numaflow-models/src/models/mod.rs b/rust/numaflow-models/src/models/mod.rs similarity index 100% rename from serving/numaflow-models/src/models/mod.rs rename to rust/numaflow-models/src/models/mod.rs diff --git a/serving/numaflow-models/src/models/mono_vertex.rs b/rust/numaflow-models/src/models/mono_vertex.rs similarity index 100% rename from serving/numaflow-models/src/models/mono_vertex.rs rename to rust/numaflow-models/src/models/mono_vertex.rs diff --git a/serving/numaflow-models/src/models/mono_vertex_limits.rs b/rust/numaflow-models/src/models/mono_vertex_limits.rs similarity index 100% rename from serving/numaflow-models/src/models/mono_vertex_limits.rs rename to rust/numaflow-models/src/models/mono_vertex_limits.rs diff --git a/serving/numaflow-models/src/models/mono_vertex_list.rs b/rust/numaflow-models/src/models/mono_vertex_list.rs similarity index 100% rename from serving/numaflow-models/src/models/mono_vertex_list.rs rename to rust/numaflow-models/src/models/mono_vertex_list.rs diff --git a/serving/numaflow-models/src/models/mono_vertex_spec.rs b/rust/numaflow-models/src/models/mono_vertex_spec.rs similarity index 100% rename from serving/numaflow-models/src/models/mono_vertex_spec.rs rename to rust/numaflow-models/src/models/mono_vertex_spec.rs diff --git a/serving/numaflow-models/src/models/mono_vertex_status.rs b/rust/numaflow-models/src/models/mono_vertex_status.rs similarity index 100% rename from serving/numaflow-models/src/models/mono_vertex_status.rs rename to rust/numaflow-models/src/models/mono_vertex_status.rs diff --git a/serving/numaflow-models/src/models/native_redis.rs b/rust/numaflow-models/src/models/native_redis.rs similarity index 100% rename from serving/numaflow-models/src/models/native_redis.rs rename to rust/numaflow-models/src/models/native_redis.rs diff --git a/serving/numaflow-models/src/models/nats_auth.rs b/rust/numaflow-models/src/models/nats_auth.rs similarity index 100% rename from serving/numaflow-models/src/models/nats_auth.rs rename to rust/numaflow-models/src/models/nats_auth.rs diff --git a/serving/numaflow-models/src/models/nats_source.rs b/rust/numaflow-models/src/models/nats_source.rs similarity index 100% rename from serving/numaflow-models/src/models/nats_source.rs rename to rust/numaflow-models/src/models/nats_source.rs diff --git a/serving/numaflow-models/src/models/no_store.rs b/rust/numaflow-models/src/models/no_store.rs similarity index 100% rename from serving/numaflow-models/src/models/no_store.rs rename to rust/numaflow-models/src/models/no_store.rs diff --git a/serving/numaflow-models/src/models/pbq_storage.rs b/rust/numaflow-models/src/models/pbq_storage.rs similarity index 100% rename from serving/numaflow-models/src/models/pbq_storage.rs rename to rust/numaflow-models/src/models/pbq_storage.rs diff --git a/serving/numaflow-models/src/models/persistence_strategy.rs b/rust/numaflow-models/src/models/persistence_strategy.rs similarity index 100% rename from serving/numaflow-models/src/models/persistence_strategy.rs rename to rust/numaflow-models/src/models/persistence_strategy.rs diff --git a/serving/numaflow-models/src/models/pipeline.rs b/rust/numaflow-models/src/models/pipeline.rs similarity index 100% rename from serving/numaflow-models/src/models/pipeline.rs rename to rust/numaflow-models/src/models/pipeline.rs diff --git a/serving/numaflow-models/src/models/pipeline_limits.rs b/rust/numaflow-models/src/models/pipeline_limits.rs similarity index 100% rename from serving/numaflow-models/src/models/pipeline_limits.rs rename to rust/numaflow-models/src/models/pipeline_limits.rs diff --git a/serving/numaflow-models/src/models/pipeline_list.rs b/rust/numaflow-models/src/models/pipeline_list.rs similarity index 100% rename from serving/numaflow-models/src/models/pipeline_list.rs rename to rust/numaflow-models/src/models/pipeline_list.rs diff --git a/serving/numaflow-models/src/models/pipeline_spec.rs b/rust/numaflow-models/src/models/pipeline_spec.rs similarity index 100% rename from serving/numaflow-models/src/models/pipeline_spec.rs rename to rust/numaflow-models/src/models/pipeline_spec.rs diff --git a/serving/numaflow-models/src/models/pipeline_status.rs b/rust/numaflow-models/src/models/pipeline_status.rs similarity index 100% rename from serving/numaflow-models/src/models/pipeline_status.rs rename to rust/numaflow-models/src/models/pipeline_status.rs diff --git a/serving/numaflow-models/src/models/redis_buffer_service.rs b/rust/numaflow-models/src/models/redis_buffer_service.rs similarity index 100% rename from serving/numaflow-models/src/models/redis_buffer_service.rs rename to rust/numaflow-models/src/models/redis_buffer_service.rs diff --git a/serving/numaflow-models/src/models/redis_config.rs b/rust/numaflow-models/src/models/redis_config.rs similarity index 100% rename from serving/numaflow-models/src/models/redis_config.rs rename to rust/numaflow-models/src/models/redis_config.rs diff --git a/serving/numaflow-models/src/models/redis_settings.rs b/rust/numaflow-models/src/models/redis_settings.rs similarity index 100% rename from serving/numaflow-models/src/models/redis_settings.rs rename to rust/numaflow-models/src/models/redis_settings.rs diff --git a/serving/numaflow-models/src/models/sasl.rs b/rust/numaflow-models/src/models/sasl.rs similarity index 100% rename from serving/numaflow-models/src/models/sasl.rs rename to rust/numaflow-models/src/models/sasl.rs diff --git a/serving/numaflow-models/src/models/sasl_plain.rs b/rust/numaflow-models/src/models/sasl_plain.rs similarity index 100% rename from serving/numaflow-models/src/models/sasl_plain.rs rename to rust/numaflow-models/src/models/sasl_plain.rs diff --git a/serving/numaflow-models/src/models/scale.rs b/rust/numaflow-models/src/models/scale.rs similarity index 100% rename from serving/numaflow-models/src/models/scale.rs rename to rust/numaflow-models/src/models/scale.rs diff --git a/serving/numaflow-models/src/models/serving_source.rs b/rust/numaflow-models/src/models/serving_source.rs similarity index 100% rename from serving/numaflow-models/src/models/serving_source.rs rename to rust/numaflow-models/src/models/serving_source.rs diff --git a/serving/numaflow-models/src/models/serving_store.rs b/rust/numaflow-models/src/models/serving_store.rs similarity index 100% rename from serving/numaflow-models/src/models/serving_store.rs rename to rust/numaflow-models/src/models/serving_store.rs diff --git a/serving/numaflow-models/src/models/session_window.rs b/rust/numaflow-models/src/models/session_window.rs similarity index 100% rename from serving/numaflow-models/src/models/session_window.rs rename to rust/numaflow-models/src/models/session_window.rs diff --git a/serving/numaflow-models/src/models/side_input.rs b/rust/numaflow-models/src/models/side_input.rs similarity index 100% rename from serving/numaflow-models/src/models/side_input.rs rename to rust/numaflow-models/src/models/side_input.rs diff --git a/serving/numaflow-models/src/models/side_input_trigger.rs b/rust/numaflow-models/src/models/side_input_trigger.rs similarity index 100% rename from serving/numaflow-models/src/models/side_input_trigger.rs rename to rust/numaflow-models/src/models/side_input_trigger.rs diff --git a/serving/numaflow-models/src/models/side_inputs_manager_template.rs b/rust/numaflow-models/src/models/side_inputs_manager_template.rs similarity index 100% rename from serving/numaflow-models/src/models/side_inputs_manager_template.rs rename to rust/numaflow-models/src/models/side_inputs_manager_template.rs diff --git a/serving/numaflow-models/src/models/sink.rs b/rust/numaflow-models/src/models/sink.rs similarity index 100% rename from serving/numaflow-models/src/models/sink.rs rename to rust/numaflow-models/src/models/sink.rs diff --git a/serving/numaflow-models/src/models/sliding_window.rs b/rust/numaflow-models/src/models/sliding_window.rs similarity index 100% rename from serving/numaflow-models/src/models/sliding_window.rs rename to rust/numaflow-models/src/models/sliding_window.rs diff --git a/serving/numaflow-models/src/models/source.rs b/rust/numaflow-models/src/models/source.rs similarity index 100% rename from serving/numaflow-models/src/models/source.rs rename to rust/numaflow-models/src/models/source.rs diff --git a/serving/numaflow-models/src/models/status.rs b/rust/numaflow-models/src/models/status.rs similarity index 100% rename from serving/numaflow-models/src/models/status.rs rename to rust/numaflow-models/src/models/status.rs diff --git a/serving/numaflow-models/src/models/tag_conditions.rs b/rust/numaflow-models/src/models/tag_conditions.rs similarity index 100% rename from serving/numaflow-models/src/models/tag_conditions.rs rename to rust/numaflow-models/src/models/tag_conditions.rs diff --git a/serving/numaflow-models/src/models/templates.rs b/rust/numaflow-models/src/models/templates.rs similarity index 100% rename from serving/numaflow-models/src/models/templates.rs rename to rust/numaflow-models/src/models/templates.rs diff --git a/serving/numaflow-models/src/models/tls.rs b/rust/numaflow-models/src/models/tls.rs similarity index 100% rename from serving/numaflow-models/src/models/tls.rs rename to rust/numaflow-models/src/models/tls.rs diff --git a/serving/numaflow-models/src/models/transformer.rs b/rust/numaflow-models/src/models/transformer.rs similarity index 100% rename from serving/numaflow-models/src/models/transformer.rs rename to rust/numaflow-models/src/models/transformer.rs diff --git a/serving/numaflow-models/src/models/ud_sink.rs b/rust/numaflow-models/src/models/ud_sink.rs similarity index 100% rename from serving/numaflow-models/src/models/ud_sink.rs rename to rust/numaflow-models/src/models/ud_sink.rs diff --git a/serving/numaflow-models/src/models/ud_source.rs b/rust/numaflow-models/src/models/ud_source.rs similarity index 100% rename from serving/numaflow-models/src/models/ud_source.rs rename to rust/numaflow-models/src/models/ud_source.rs diff --git a/serving/numaflow-models/src/models/ud_transformer.rs b/rust/numaflow-models/src/models/ud_transformer.rs similarity index 100% rename from serving/numaflow-models/src/models/ud_transformer.rs rename to rust/numaflow-models/src/models/ud_transformer.rs diff --git a/serving/numaflow-models/src/models/udf.rs b/rust/numaflow-models/src/models/udf.rs similarity index 100% rename from serving/numaflow-models/src/models/udf.rs rename to rust/numaflow-models/src/models/udf.rs diff --git a/serving/numaflow-models/src/models/vertex.rs b/rust/numaflow-models/src/models/vertex.rs similarity index 100% rename from serving/numaflow-models/src/models/vertex.rs rename to rust/numaflow-models/src/models/vertex.rs diff --git a/serving/numaflow-models/src/models/vertex_instance.rs b/rust/numaflow-models/src/models/vertex_instance.rs similarity index 100% rename from serving/numaflow-models/src/models/vertex_instance.rs rename to rust/numaflow-models/src/models/vertex_instance.rs diff --git a/serving/numaflow-models/src/models/vertex_limits.rs b/rust/numaflow-models/src/models/vertex_limits.rs similarity index 100% rename from serving/numaflow-models/src/models/vertex_limits.rs rename to rust/numaflow-models/src/models/vertex_limits.rs diff --git a/serving/numaflow-models/src/models/vertex_list.rs b/rust/numaflow-models/src/models/vertex_list.rs similarity index 100% rename from serving/numaflow-models/src/models/vertex_list.rs rename to rust/numaflow-models/src/models/vertex_list.rs diff --git a/serving/numaflow-models/src/models/vertex_spec.rs b/rust/numaflow-models/src/models/vertex_spec.rs similarity index 100% rename from serving/numaflow-models/src/models/vertex_spec.rs rename to rust/numaflow-models/src/models/vertex_spec.rs diff --git a/serving/numaflow-models/src/models/vertex_status.rs b/rust/numaflow-models/src/models/vertex_status.rs similarity index 100% rename from serving/numaflow-models/src/models/vertex_status.rs rename to rust/numaflow-models/src/models/vertex_status.rs diff --git a/serving/numaflow-models/src/models/vertex_template.rs b/rust/numaflow-models/src/models/vertex_template.rs similarity index 100% rename from serving/numaflow-models/src/models/vertex_template.rs rename to rust/numaflow-models/src/models/vertex_template.rs diff --git a/serving/numaflow-models/src/models/watermark.rs b/rust/numaflow-models/src/models/watermark.rs similarity index 100% rename from serving/numaflow-models/src/models/watermark.rs rename to rust/numaflow-models/src/models/watermark.rs diff --git a/serving/numaflow-models/src/models/window.rs b/rust/numaflow-models/src/models/window.rs similarity index 100% rename from serving/numaflow-models/src/models/window.rs rename to rust/numaflow-models/src/models/window.rs diff --git a/serving/numaflow-models/templates/Cargo.mustache b/rust/numaflow-models/templates/Cargo.mustache similarity index 100% rename from serving/numaflow-models/templates/Cargo.mustache rename to rust/numaflow-models/templates/Cargo.mustache diff --git a/serving/numaflow-models/templates/partial_header.mustache b/rust/numaflow-models/templates/partial_header.mustache similarity index 100% rename from serving/numaflow-models/templates/partial_header.mustache rename to rust/numaflow-models/templates/partial_header.mustache diff --git a/serving/servesink/.dockerignore b/rust/servesink/.dockerignore similarity index 100% rename from serving/servesink/.dockerignore rename to rust/servesink/.dockerignore diff --git a/serving/servesink/Cargo.toml b/rust/servesink/Cargo.toml similarity index 88% rename from serving/servesink/Cargo.toml rename to rust/servesink/Cargo.toml index 7534a32008..70fa8e55f5 100644 --- a/serving/servesink/Cargo.toml +++ b/rust/servesink/Cargo.toml @@ -3,10 +3,6 @@ name = "servesink" version = "0.1.0" edition = "2021" -[[bin]] -name = "servesink" -path = "src/main.rs" - [dependencies] tonic = "0.12.0" tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } diff --git a/serving/servesink/Dockerfile b/rust/servesink/Dockerfile similarity index 100% rename from serving/servesink/Dockerfile rename to rust/servesink/Dockerfile diff --git a/serving/servesink/src/main.rs b/rust/servesink/src/lib.rs similarity index 96% rename from serving/servesink/src/main.rs rename to rust/servesink/src/lib.rs index aa089f60d1..5663b61b07 100644 --- a/serving/servesink/src/main.rs +++ b/rust/servesink/src/lib.rs @@ -5,8 +5,7 @@ use reqwest::Client; use tracing::{error, warn}; use tracing_subscriber::prelude::*; -#[tokio::main] -async fn main() -> Result<(), Box> { +pub async fn servesink() -> Result<(), Box> { tracing_subscriber::registry() .with( tracing_subscriber::EnvFilter::try_from_default_env() diff --git a/serving/Cargo.toml b/rust/serving/Cargo.toml similarity index 86% rename from serving/Cargo.toml rename to rust/serving/Cargo.toml index 58525ad62b..635bb4f208 100644 --- a/serving/Cargo.toml +++ b/rust/serving/Cargo.toml @@ -1,6 +1,5 @@ -workspace = { members = ["backoff", "extras/upstreams", "numaflow-models", "servesink", "source-sink"] } [package] -name = "serve" +name = "serving" version = "0.1.0" edition = "2021" @@ -29,7 +28,6 @@ redis = { version = "0.26.0", features = ["tokio-comp", "aio", "connection-manag config = "0.14.0" trait-variant = "0.1.2" chrono = { version = "0.4", features = ["serde"] } -# intern -backoff = { path = "backoff" } +backoff = { path = "../backoff" } base64 = "0.22.1" diff --git a/rust/serving/README.md b/rust/serving/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/serving/config/default.toml b/rust/serving/config/default.toml similarity index 100% rename from serving/config/default.toml rename to rust/serving/config/default.toml diff --git a/serving/config/jetstream.conf b/rust/serving/config/jetstream.conf similarity index 100% rename from serving/config/jetstream.conf rename to rust/serving/config/jetstream.conf diff --git a/serving/config/pipeline_spec.json b/rust/serving/config/pipeline_spec.json similarity index 100% rename from serving/config/pipeline_spec.json rename to rust/serving/config/pipeline_spec.json diff --git a/serving/src/app.rs b/rust/serving/src/app.rs similarity index 100% rename from serving/src/app.rs rename to rust/serving/src/app.rs diff --git a/serving/src/app/callback.rs b/rust/serving/src/app/callback.rs similarity index 100% rename from serving/src/app/callback.rs rename to rust/serving/src/app/callback.rs diff --git a/serving/src/app/callback/state.rs b/rust/serving/src/app/callback/state.rs similarity index 100% rename from serving/src/app/callback/state.rs rename to rust/serving/src/app/callback/state.rs diff --git a/serving/src/app/callback/store.rs b/rust/serving/src/app/callback/store.rs similarity index 100% rename from serving/src/app/callback/store.rs rename to rust/serving/src/app/callback/store.rs diff --git a/serving/src/app/callback/store/memstore.rs b/rust/serving/src/app/callback/store/memstore.rs similarity index 100% rename from serving/src/app/callback/store/memstore.rs rename to rust/serving/src/app/callback/store/memstore.rs diff --git a/serving/src/app/callback/store/redisstore.rs b/rust/serving/src/app/callback/store/redisstore.rs similarity index 100% rename from serving/src/app/callback/store/redisstore.rs rename to rust/serving/src/app/callback/store/redisstore.rs diff --git a/serving/src/app/direct_proxy.rs b/rust/serving/src/app/direct_proxy.rs similarity index 100% rename from serving/src/app/direct_proxy.rs rename to rust/serving/src/app/direct_proxy.rs diff --git a/serving/src/app/jetstream_proxy.rs b/rust/serving/src/app/jetstream_proxy.rs similarity index 98% rename from serving/src/app/jetstream_proxy.rs rename to rust/serving/src/app/jetstream_proxy.rs index b243a030b6..6a56266f72 100644 --- a/serving/src/app/jetstream_proxy.rs +++ b/rust/serving/src/app/jetstream_proxy.rs @@ -117,8 +117,8 @@ async fn sync_publish_serve( } }; - // The reponse can be a binary array of elements as single chunk. For the user to process the blob, we return the array len and - // length of each element in the array. This will help the user to decomponse the binary response chunk into individual + // The response can be a binary array of elements as single chunk. For the user to process the blob, we return the array len and + // length of each element in the array. This will help the user to decompose the binary response chunk into individual // elements. let mut header_map = HeaderMap::new(); let response_arr_len: String = result diff --git a/serving/src/app/message_path.rs b/rust/serving/src/app/message_path.rs similarity index 100% rename from serving/src/app/message_path.rs rename to rust/serving/src/app/message_path.rs diff --git a/serving/src/app/response.rs b/rust/serving/src/app/response.rs similarity index 100% rename from serving/src/app/response.rs rename to rust/serving/src/app/response.rs diff --git a/serving/src/app/tracker.rs b/rust/serving/src/app/tracker.rs similarity index 100% rename from serving/src/app/tracker.rs rename to rust/serving/src/app/tracker.rs diff --git a/serving/src/config.rs b/rust/serving/src/config.rs similarity index 100% rename from serving/src/config.rs rename to rust/serving/src/config.rs diff --git a/serving/src/consts.rs b/rust/serving/src/consts.rs similarity index 100% rename from serving/src/consts.rs rename to rust/serving/src/consts.rs diff --git a/serving/src/error.rs b/rust/serving/src/error.rs similarity index 100% rename from serving/src/error.rs rename to rust/serving/src/error.rs diff --git a/serving/src/main.rs b/rust/serving/src/lib.rs similarity index 87% rename from serving/src/main.rs rename to rust/serving/src/lib.rs index 6e3beea02d..929a28f050 100644 --- a/serving/src/main.rs +++ b/rust/serving/src/lib.rs @@ -1,12 +1,11 @@ use tracing::{error, info}; -use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; - -pub use config::config; - -use crate::pipeline::pipeline_spec; -use crate::{app::start_main_server, metrics::start_metrics_server}; - +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::util::SubscriberInitExt; +use crate::app::start_main_server; +use crate::config::config; pub use self::error::{Error, Result}; +use crate::metrics::start_metrics_server; +use crate::pipeline::pipeline_spec; mod app; mod config; @@ -15,8 +14,7 @@ mod error; mod metrics; mod pipeline; -#[tokio::main] -async fn main() { +pub async fn serve() { tracing_subscriber::registry() .with( tracing_subscriber::EnvFilter::try_from_default_env() @@ -50,4 +48,4 @@ async fn flatten(handle: tokio::task::JoinHandle>) -> Result { Ok(Err(err)) => Err(err), Err(err) => Err(Error::Other(format!("Spawning the server: {err:?}"))), } -} \ No newline at end of file +} diff --git a/serving/src/metrics.rs b/rust/serving/src/metrics.rs similarity index 100% rename from serving/src/metrics.rs rename to rust/serving/src/metrics.rs diff --git a/serving/src/pipeline.rs b/rust/serving/src/pipeline.rs similarity index 100% rename from serving/src/pipeline.rs rename to rust/serving/src/pipeline.rs diff --git a/rust/src/bin/main.rs b/rust/src/bin/main.rs new file mode 100644 index 0000000000..ec715b0b34 --- /dev/null +++ b/rust/src/bin/main.rs @@ -0,0 +1,20 @@ +use std::env; +use tracing::{error, info}; + +#[tokio::main] +async fn main() { + let args: Vec = env::args().collect(); + + // Based on the argument, run the appropriate component. + if args.contains(&"--serving".to_string()) { + serving::serve().await; + } else if args.contains(&"--servesink".to_string()) { + if let Err(e) = servesink::servesink().await { + info!("Error running servesink: {}", e); + } + } else if args.contains(&"--monovertex".to_string()) { + monovertex::mono_vertex().await; + } else { + error!("Invalid argument. Use --serve, --servesink, or --monovertex."); + } +} \ No newline at end of file diff --git a/serving/extras/upstreams/Cargo.toml b/serving/extras/upstreams/Cargo.toml deleted file mode 100644 index 58340d4891..0000000000 --- a/serving/extras/upstreams/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "upstreams" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -axum = "0.7.5" -axum-macros = "0.4.1" -serde = { version = "1.0.197", features = ["derive"] } -serde_json = "1.0.114" -tokio = { version = "1.36.0", features = ["full"] } -tower-http = { version = "0.5.2", features = ["trace"] } -tracing = "0.1.40" -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } - -[dev-dependencies] -http-body-util = "0.1.1" -tower = "0.4.13" diff --git a/serving/extras/upstreams/src/bin/simple_proxy.rs b/serving/extras/upstreams/src/bin/simple_proxy.rs deleted file mode 100644 index d09f5167a9..0000000000 --- a/serving/extras/upstreams/src/bin/simple_proxy.rs +++ /dev/null @@ -1,94 +0,0 @@ -use std::time::Duration; - -use axum::{response::IntoResponse, routing::get, Router}; -use axum_macros::debug_handler; -use tokio::{net::TcpListener, time::sleep}; -use tower_http::trace::TraceLayer; -use tracing::{debug, info}; -use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; - -#[tokio::main] -async fn main() { - tracing_subscriber::registry() - .with( - tracing_subscriber::EnvFilter::try_from_default_env() - .unwrap_or_else(|_| "simple_proxy=debug,tower_http=debug".into()), - ) - .with(tracing_subscriber::fmt::layer()) - .init(); - - let router = app(); - let router = router.layer(TraceLayer::new_for_http()); - - let listener = TcpListener::bind("localhost:8888").await.unwrap(); - info!("listening on {}", listener.local_addr().unwrap()); - axum::serve(listener, router).await.unwrap(); -} - -fn app() -> Router { - Router::new() - .route("/fast", get(|| async {})) - .route( - "/slow", - get(|| async { - debug!("sleeping"); - sleep(Duration::from_secs(1)).await - }), - ) - .route("/", get(root_handler)) -} - -#[debug_handler] -async fn root_handler() -> impl IntoResponse { - "ok" -} - -#[cfg(test)] -mod tests { - - // inspired from: https://github.com/tokio-rs/axum/blob/main/examples/testing/src/main.rs - - use axum::{ - body::Body, - http::{Request, StatusCode}, - }; - use http_body_util::BodyExt; - use tower::ServiceExt; - - use super::*; - - #[tokio::test] - async fn fast() { - let app = app(); - - let request = Request::builder().uri("/fast").body(Body::empty()).unwrap(); - - let response = app.oneshot(request).await.unwrap(); - - assert_eq!(response.status(), StatusCode::OK); - } - - #[tokio::test] - async fn slow() { - let app = app(); - - let request = Request::builder().uri("/slow").body(Body::empty()).unwrap(); - - let response = app.oneshot(request).await.unwrap(); - - assert_eq!(response.status(), StatusCode::OK); - } - - #[tokio::test] - async fn root() { - let app = app(); - - let request = Request::builder().uri("/").body(Body::empty()).unwrap(); - - let response = app.oneshot(request).await.unwrap(); - assert_eq!(response.status(), StatusCode::OK); - - let body = response.into_body().collect().await.unwrap().to_bytes(); - assert_eq!(&body[..], b"ok"); - } -} diff --git a/serving/source-sink/Dockerfile b/serving/source-sink/Dockerfile deleted file mode 100644 index 4ed8bb62f7..0000000000 --- a/serving/source-sink/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -FROM rust:1.76-bookworm AS build - -RUN apt-get update -RUN apt-get install protobuf-compiler -y - -WORKDIR /source-sink -COPY ./ ./ - -# build for release -RUN cargo build --release - -# our final base -FROM debian:bookworm AS simple-source - -# copy the build artifact from the build stage -COPY --from=build /source-sink/target/release/source-sink /bin/serve - -# set the startup command to run your binary -CMD ["/bin/serve"] diff --git a/serving/source-sink/src/main.rs b/serving/source-sink/src/main.rs deleted file mode 100644 index e3cfb7e6a6..0000000000 --- a/serving/source-sink/src/main.rs +++ /dev/null @@ -1,89 +0,0 @@ -use tokio::signal; -use tokio::task::JoinHandle; -use tokio_util::sync::CancellationToken; -use tracing::level_filters::LevelFilter; -use tracing::{error, info}; -use tracing_subscriber::EnvFilter; - -use sourcer_sinker::config::config; -use sourcer_sinker::init; -use sourcer_sinker::sink::SinkConfig; -use sourcer_sinker::source::SourceConfig; -use sourcer_sinker::transformer::TransformerConfig; - -#[tokio::main] -async fn main() { - // Initialize the logger - tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::builder() - .with_default_directive(LevelFilter::INFO.into()) - .parse_lossy(&config().log_level), - ) - .with_target(false) - .init(); - - // Initialize the source, sink and transformer configurations - // We are using the default configurations for now. - let source_config = SourceConfig { - max_message_size: config().grpc_max_message_size, - ..Default::default() - }; - - let sink_config = SinkConfig { - max_message_size: config().grpc_max_message_size, - ..Default::default() - }; - - let transformer_config = if config().is_transformer_enabled { - Some(TransformerConfig { - max_message_size: config().grpc_max_message_size, - ..Default::default() - }) - } else { - None - }; - - let cln_token = CancellationToken::new(); - let shutdown_cln_token = cln_token.clone(); - // wait for SIG{INT,TERM} and invoke cancellation token. - let shutdown_handle: JoinHandle> = tokio::spawn(async move { - shutdown_signal().await; - shutdown_cln_token.cancel(); - Ok(()) - }); - - // Run the forwarder with cancellation token. - if let Err(e) = init(source_config, sink_config, transformer_config, cln_token).await { - error!("Application error: {:?}", e); - - // abort the task since we have an error - if !shutdown_handle.is_finished() { - shutdown_handle.abort(); - } - } - - info!("Gracefully Exiting..."); -} - -async fn shutdown_signal() { - let ctrl_c = async { - signal::ctrl_c() - .await - .expect("failed to install Ctrl+C handler"); - info!("Received Ctrl+C signal"); - }; - - let terminate = async { - signal::unix::signal(signal::unix::SignalKind::terminate()) - .expect("failed to install signal handler") - .recv() - .await; - info!("Received terminate signal"); - }; - - tokio::select! { - _ = ctrl_c => {}, - _ = terminate => {}, - } -} From 2f15b7d2aa1c3083e5b42e43531bbb91d91d0093 Mon Sep 17 00:00:00 2001 From: mdwarne1 Date: Tue, 13 Aug 2024 11:50:15 -0400 Subject: [PATCH 17/23] Add Lockheed to Users.md (#1934) Signed-off-by: Matt Warner --- USERS.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/USERS.md b/USERS.md index 25162c20c8..ce6cdd45e5 100644 --- a/USERS.md +++ b/USERS.md @@ -5,4 +5,5 @@ Please add your company name and initial use case (optional) below. 1. [Intuit](https://www.intuit.com/) - Streaming ML inference and to prepare data for ML model training in real-time. 2. [B|Cubed](https://bcubed-corp.com/) - Digital Signal Processing Communication Systems. We receive RF energy and use numaflows to transform the RF to individual bits into intelligible data. 3. [Atlan](https://atlan.com/) - Numaflow powers real time notifications, stream processing ecosystem at Atlan. -4. [Valegachain Analytics](https://www.valegachain.com/) Numaflow is used to extract, transform, and load cryptocurrency blocks and mempool transactions in data lakes, as well as for activity alerts. \ No newline at end of file +4. [Valegachain Analytics](https://www.valegachain.com/) Numaflow is used to extract, transform, and load cryptocurrency blocks and mempool transactions in data lakes, as well as for activity alerts. +5. [Lockheed Martin](https://lockheedmartin.com/) Perform ELT processing on high and low volume data streams of sensor data as recieved from IOT type systems. From 4d2cc2ccb33253091a72594e1792f27d03d69a64 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Tue, 13 Aug 2024 11:18:35 -0700 Subject: [PATCH 18/23] chore: add transformer to MonoVertex example (#1935) Signed-off-by: Vigith Maurice --- examples/21-simple-mono-vertex.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/examples/21-simple-mono-vertex.yaml b/examples/21-simple-mono-vertex.yaml index be625c41d2..98192aa8fd 100644 --- a/examples/21-simple-mono-vertex.yaml +++ b/examples/21-simple-mono-vertex.yaml @@ -7,6 +7,10 @@ spec: udsource: container: image: quay.io/numaio/numaflow-java/source-simple-source:stable + # transformer is an optional container to do any transformation to the incoming data before passing to the sink + transformer: + container: + image: quay.io/numaio/numaflow-rs/source-transformer-now:stable sink: udsink: container: From bbfe02a6c8511b1b5d4be4205498319a37fe476f Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Wed, 14 Aug 2024 00:02:06 +0530 Subject: [PATCH 19/23] fix: retry failed messages for MonoVertex sink (#1933) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/monovertex/src/config.rs | 6 + rust/monovertex/src/forwarder.rs | 194 +++++++++++++++++++++++++---- rust/monovertex/src/lib.rs | 10 +- rust/monovertex/src/message.rs | 7 +- rust/monovertex/src/metrics.rs | 49 +++++--- rust/monovertex/src/sink.rs | 3 +- rust/monovertex/src/transformer.rs | 3 + rust/serving/src/lib.rs | 8 +- rust/src/bin/main.rs | 2 +- 9 files changed, 234 insertions(+), 48 deletions(-) diff --git a/rust/monovertex/src/config.rs b/rust/monovertex/src/config.rs index 3939e16f1a..7e102e06b8 100644 --- a/rust/monovertex/src/config.rs +++ b/rust/monovertex/src/config.rs @@ -19,6 +19,8 @@ const DEFAULT_LAG_CHECK_INTERVAL_IN_SECS: u16 = 5; const DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS: u16 = 3; const DEFAULT_BATCH_SIZE: u64 = 500; const DEFAULT_TIMEOUT_IN_MS: u32 = 1000; +const DEFAULT_MAX_SINK_RETRY_ATTEMPTS: u16 = 10; +const DEFAULT_SINK_RETRY_INTERVAL_IN_MS: u32 = 1; pub fn config() -> &'static Settings { static CONF: OnceLock = OnceLock::new(); @@ -41,6 +43,8 @@ pub struct Settings { pub is_transformer_enabled: bool, pub lag_check_interval_in_secs: u16, pub lag_refresh_interval_in_secs: u16, + pub sink_max_retry_attempts: u16, + pub sink_retry_interval_in_ms: u32, } impl Default for Settings { @@ -56,6 +60,8 @@ impl Default for Settings { is_transformer_enabled: false, lag_check_interval_in_secs: DEFAULT_LAG_CHECK_INTERVAL_IN_SECS, lag_refresh_interval_in_secs: DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS, + sink_max_retry_attempts: DEFAULT_MAX_SINK_RETRY_ATTEMPTS, + sink_retry_interval_in_ms: DEFAULT_SINK_RETRY_INTERVAL_IN_MS, } } } diff --git a/rust/monovertex/src/forwarder.rs b/rust/monovertex/src/forwarder.rs index 002d42ee9c..014789b7b1 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/monovertex/src/forwarder.rs @@ -1,16 +1,19 @@ use chrono::Utc; use metrics::counter; +use std::collections::HashMap; use tokio::task::JoinSet; +use tokio::time::sleep; use tokio_util::sync::CancellationToken; -use tracing::{info, trace}; - +use tracing::info; +use tracing::log::warn; use crate::config::config; use crate::error::{Error, Result}; +use crate::message::Offset; use crate::metrics::{ FORWARDER_ACK_TOTAL, FORWARDER_READ_BYTES_TOTAL, FORWARDER_READ_TOTAL, FORWARDER_WRITE_TOTAL, MONO_VERTEX_NAME, PARTITION_LABEL, REPLICA_LABEL, VERTEX_TYPE_LABEL, }; -use crate::sink::SinkClient; +use crate::sink::{proto, SinkClient}; use crate::source::SourceClient; use crate::transformer::TransformerClient; @@ -79,8 +82,6 @@ impl Forwarder { counter!(FORWARDER_READ_TOTAL, &self.common_labels).increment(messages_count); counter!(FORWARDER_READ_BYTES_TOTAL, &self.common_labels).increment(bytes_count); - // Extract offsets from the messages - let offsets = messages.iter().map(|message| message.offset.clone()).collect(); // Apply transformation if transformer is present let transformed_messages = if let Some(transformer_client) = &self.transformer_client { let start_time = tokio::time::Instant::now(); @@ -106,21 +107,61 @@ impl Forwarder { // TODO: should we retry writing? what if the error is transient? // we could rely on gRPC retries and say that any error that is bubbled up is worthy of non-0 exit. // we need to confirm this via FMEA tests. - let start_time = tokio::time::Instant::now(); - self.sink_client.sink_fn(transformed_messages).await?; - info!("Sink latency - {}ms", start_time.elapsed().as_millis()); - counter!(FORWARDER_WRITE_TOTAL, &self.common_labels).increment(messages_count); + let mut retry_messages = transformed_messages; + let mut attempts = 0; + let mut error_map = HashMap::new(); - // Acknowledge the messages - // TODO: should we retry acking? what if the error is transient? - // we could rely on gRPC retries and say that any error that is bubbled up is worthy of non-0 exit. - // we need to confirm this via FMEA tests. - let start_time = tokio::time::Instant::now(); - self.source_client.ack_fn(offsets).await?; - info!("Ack latency - {}ms", start_time.elapsed().as_millis()); + while attempts <= config().sink_max_retry_attempts { + let start_time = tokio::time::Instant::now(); + match self.sink_client.sink_fn(retry_messages.clone()).await { + Ok(response) => { + info!("Sink latency - {}ms", start_time.elapsed().as_millis()); + + let failed_ids: Vec = response.results.iter() + .filter(|result| result.status != proto::Status::Success as i32) + .map(|result| result.id.clone()) + .collect(); + + let successful_offsets: Vec = retry_messages.iter() + .filter(|msg| !failed_ids.contains(&msg.id)) + .map(|msg| msg.offset.clone()) + .collect(); + + + // ack the successful offsets + let n = successful_offsets.len(); + self.source_client.ack_fn(successful_offsets).await?; + counter!(FORWARDER_WRITE_TOTAL, &self.common_labels).increment(n as u64); + attempts += 1; + + if failed_ids.is_empty() { + break; + } else { + // Collect error messages and their counts + retry_messages.retain(|msg| failed_ids.contains(&msg.id)); + error_map.clear(); + for result in response.results { + if result.status != proto::Status::Success as i32 { + *error_map.entry(result.err_msg).or_insert(0) += 1; + } + } + + warn!("Retry attempt {} due to retryable error. Errors: {:?}", attempts, error_map); + sleep(tokio::time::Duration::from_millis(config().sink_retry_interval_in_ms as u64)).await; + } + } + Err(e) => return Err(e), + } + } + + if !error_map.is_empty() { + return Err(Error::SinkError(format!( + "Failed to sink messages after {} attempts. Errors: {:?}", + attempts, error_map + ))); + } counter!(FORWARDER_ACK_TOTAL, &self.common_labels).increment(messages_count); - trace!("Forwarded {} messages", messages_count); } } // if the last forward was more than 1 second ago, forward a chunk print the number of messages forwarded @@ -142,17 +183,17 @@ impl Forwarder { mod tests { use std::collections::HashSet; + use crate::error::Error; + use crate::forwarder::Forwarder; + use crate::sink::{SinkClient, SinkConfig}; + use crate::source::{SourceClient, SourceConfig}; + use crate::transformer::{TransformerClient, TransformerConfig}; use chrono::Utc; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source, sourcetransform}; use tokio::sync::mpsc::Sender; use tokio_util::sync::CancellationToken; - use crate::forwarder::Forwarder; - use crate::sink::{SinkClient, SinkConfig}; - use crate::source::{SourceClient, SourceConfig}; - use crate::transformer::{TransformerClient, TransformerConfig}; - struct SimpleSource { yet_to_be_acked: std::sync::RwLock>, } @@ -412,4 +453,113 @@ mod tests { .await .expect("failed to join sink server task"); } + + struct ErrorSink {} + + #[tonic::async_trait] + impl sink::Sinker for ErrorSink { + async fn sink( + &self, + mut input: tokio::sync::mpsc::Receiver, + ) -> Vec { + let mut responses = vec![]; + while let Some(datum) = input.recv().await { + responses.append(&mut vec![sink::Response::failure( + datum.id, + "error".to_string(), + )]); + } + responses + } + } + + #[tokio::test] + async fn test_forwarder_sink_error() { + // Start the source server + let (source_shutdown_tx, source_shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let source_sock_file = tmp_dir.path().join("source.sock"); + let server_info_file = tmp_dir.path().join("source-server-info"); + + let server_info = server_info_file.clone(); + let source_socket = source_sock_file.clone(); + let source_server_handle = tokio::spawn(async move { + source::Server::new(SimpleSource::new()) + .with_socket_file(source_socket) + .with_server_info_file(server_info) + .start_with_shutdown(source_shutdown_rx) + .await + .unwrap(); + }); + let source_config = SourceConfig { + socket_path: source_sock_file.to_str().unwrap().to_string(), + server_info_file: server_info_file.to_str().unwrap().to_string(), + max_message_size: 4 * 1024 * 1024, + }; + + // Start the sink server + let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); + let sink_tmp_dir = tempfile::TempDir::new().unwrap(); + let sink_sock_file = sink_tmp_dir.path().join("sink.sock"); + let server_info_file = sink_tmp_dir.path().join("sink-server-info"); + + let server_info = server_info_file.clone(); + let sink_socket = sink_sock_file.clone(); + let sink_server_handle = tokio::spawn(async move { + sink::Server::new(ErrorSink {}) + .with_socket_file(sink_socket) + .with_server_info_file(server_info) + .start_with_shutdown(sink_shutdown_rx) + .await + .unwrap(); + }); + let sink_config = SinkConfig { + socket_path: sink_sock_file.to_str().unwrap().to_string(), + server_info_file: server_info_file.to_str().unwrap().to_string(), + max_message_size: 4 * 1024 * 1024, + }; + + // Wait for the servers to start + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + let cln_token = CancellationToken::new(); + + let source_client = SourceClient::connect(source_config) + .await + .expect("failed to connect to source server"); + + let sink_client = SinkClient::connect(sink_config) + .await + .expect("failed to connect to sink server"); + + let mut forwarder = Forwarder::new(source_client, sink_client, None, cln_token.clone()) + .await + .expect("failed to create forwarder"); + + let forwarder_handle = tokio::spawn(async move { + forwarder.run().await?; + Ok(()) + }); + + // Set a timeout for the forwarder + let timeout_duration = tokio::time::Duration::from_secs(1); + let result = tokio::time::timeout(timeout_duration, forwarder_handle).await; + let result: Result<(), Error> = result.expect("forwarder_handle timed out").unwrap(); + assert!(result.is_err()); + + // stop the servers + source_shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + source_server_handle + .await + .expect("failed to join source server task"); + + sink_shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + sink_server_handle + .await + .expect("failed to join sink server task"); + } } diff --git a/rust/monovertex/src/lib.rs b/rust/monovertex/src/lib.rs index 88f7bb9c19..c4d6bf21bf 100644 --- a/rust/monovertex/src/lib.rs +++ b/rust/monovertex/src/lib.rs @@ -2,7 +2,7 @@ pub(crate) use self::error::Result; use crate::config::config; pub(crate) use crate::error::Error; use crate::forwarder::Forwarder; -use crate::metrics::{start_metrics_https_server, MetricsState}; +use crate::metrics::{start_metrics_https_server, MetricsState, LagReaderBuilder}; use crate::sink::{SinkClient, SinkConfig}; use crate::source::{SourceClient, SourceConfig}; use crate::transformer::{TransformerClient, TransformerConfig}; @@ -75,7 +75,7 @@ pub async fn mono_vertex() { let cln_token = CancellationToken::new(); let shutdown_cln_token = cln_token.clone(); // wait for SIG{INT,TERM} and invoke cancellation token. - let shutdown_handle: JoinHandle> = tokio::spawn(async move { + let shutdown_handle: JoinHandle> = tokio::spawn(async move { shutdown_signal().await; shutdown_cln_token.cancel(); Ok(()) @@ -181,10 +181,12 @@ pub async fn init( }); // start the lag reader to publish lag metrics - let mut lag_reader = metrics::LagReader::new(source_client.clone(), None, None); + let mut lag_reader = LagReaderBuilder::new(source_client.clone()) + .lag_checking_interval(Duration::from_secs(config().lag_check_interval_in_secs.into())) + .refresh_interval(Duration::from_secs(config().lag_refresh_interval_in_secs.into())) + .build(); lag_reader.start().await; - // TODO: use builder pattern of options like TIMEOUT, BATCH_SIZE, etc? let mut forwarder = Forwarder::new(source_client, sink_client, transformer_client, cln_token).await?; diff --git a/rust/monovertex/src/message.rs b/rust/monovertex/src/message.rs index 1ca69e9878..6df0874948 100644 --- a/rust/monovertex/src/message.rs +++ b/rust/monovertex/src/message.rs @@ -21,6 +21,8 @@ pub(crate) struct Message { pub(crate) offset: Offset, /// event time of the message pub(crate) event_time: DateTime, + /// id of the message + pub(crate) id: String, /// headers of the message pub(crate) headers: HashMap, } @@ -63,8 +65,9 @@ impl TryFrom for Message { Ok(Message { keys: result.keys, value: result.payload, - offset: source_offset, + offset: source_offset.clone(), event_time: utc_from_timestamp(result.event_time), + id: format!("{}-{}", source_offset.partition_id, source_offset.offset), headers: result.headers, }) } @@ -78,7 +81,7 @@ impl From for proto::SinkRequest { value: message.value, event_time: prost_timestamp_from_utc(message.event_time), watermark: None, - id: format!("{}-{}", message.offset.partition_id, message.offset.offset), + id: message.id, headers: message.headers, } } diff --git a/rust/monovertex/src/metrics.rs b/rust/monovertex/src/metrics.rs index 896c7a768d..69782e2c32 100644 --- a/rust/monovertex/src/metrics.rs +++ b/rust/monovertex/src/metrics.rs @@ -107,14 +107,12 @@ pub(crate) async fn start_metrics_https_server( /// router for metrics and k8s health endpoints fn metrics_router(recorder_handle: PrometheusHandle, metrics_state: MetricsState) -> Router { - let metrics_app = Router::new() + Router::new() .route("/metrics", get(move || ready(recorder_handle.render()))) .route("/livez", get(livez)) .route("/readyz", get(readyz)) .route("/sidecar-livez", get(sidecar_livez)) - .with_state(metrics_state); - - metrics_app + .with_state(metrics_state) } async fn livez() -> impl IntoResponse { @@ -199,23 +197,46 @@ pub(crate) struct LagReader { pending_stats: Arc>>, } -impl LagReader { - /// Creates a new `LagReader` instance. - pub(crate) fn new( - source_client: SourceClient, - lag_checking_interval: Option, - refresh_interval: Option, - ) -> Self { + +/// LagReaderBuilder is used to build a `LagReader` instance. +pub(crate) struct LagReaderBuilder { + source_client: SourceClient, + lag_checking_interval: Option, + refresh_interval: Option, +} + +impl LagReaderBuilder { + pub(crate) fn new(source_client: SourceClient) -> Self { Self { source_client, - lag_checking_interval: lag_checking_interval.unwrap_or_else(|| Duration::from_secs(3)), - refresh_interval: refresh_interval.unwrap_or_else(|| Duration::from_secs(5)), + lag_checking_interval: None, + refresh_interval: None, + } + } + + pub(crate) fn lag_checking_interval(mut self, interval: Duration) -> Self { + self.lag_checking_interval = Some(interval); + self + } + + pub(crate) fn refresh_interval(mut self, interval: Duration) -> Self { + self.refresh_interval = Some(interval); + self + } + + pub(crate) fn build(self) -> LagReader { + LagReader { + source_client: self.source_client, + lag_checking_interval: self.lag_checking_interval.unwrap_or_else(|| Duration::from_secs(3)), + refresh_interval: self.refresh_interval.unwrap_or_else(|| Duration::from_secs(5)), buildup_handle: None, expose_handle: None, pending_stats: Arc::new(Mutex::new(Vec::with_capacity(MAX_PENDING_STATS))), } } +} +impl LagReader { /// Starts the lag reader by spawning tasks to build up pending info and expose pending metrics. /// /// This method spawns two asynchronous tasks: @@ -289,7 +310,7 @@ async fn expose_pending_metrics( pending_stats: Arc>>, ) { let mut ticker = time::interval(refresh_interval); - let lookback_seconds_map = vec![("1m", 60), ("5m", 300), ("15m", 900)]; + let lookback_seconds_map = vec![("1m", 60), ("default", 120), ("5m", 300), ("15m", 900)]; loop { ticker.tick().await; for (label, seconds) in &lookback_seconds_map { diff --git a/rust/monovertex/src/sink.rs b/rust/monovertex/src/sink.rs index 167524050c..5f2b8a74be 100644 --- a/rust/monovertex/src/sink.rs +++ b/rust/monovertex/src/sink.rs @@ -71,7 +71,6 @@ impl SinkClient { } }); - // TODO: retry for response with failure status let response = self .client .sink_fn(tokio_stream::wrappers::ReceiverStream::new(rx)) @@ -159,6 +158,7 @@ mod tests { }, event_time: Utc::now(), headers: Default::default(), + id: "one".to_string(), }, Message { keys: vec![], @@ -169,6 +169,7 @@ mod tests { }, event_time: Utc::now(), headers: Default::default(), + id: "two".to_string(), }, ]; diff --git a/rust/monovertex/src/transformer.rs b/rust/monovertex/src/transformer.rs index eeabffe6fc..e4e68b6352 100644 --- a/rust/monovertex/src/transformer.rs +++ b/rust/monovertex/src/transformer.rs @@ -61,6 +61,7 @@ impl TransformerClient { pub(crate) async fn transform_fn(&mut self, message: Message) -> Result> { // fields which will not be changed let offset = message.offset.clone(); + let id = message.id.clone(); let headers = message.headers.clone(); // TODO: is this complex? the reason to do this is, tomorrow when we have the normal @@ -77,6 +78,7 @@ impl TransformerClient { keys: result.keys, value: result.value, offset: offset.clone(), + id: id.clone(), event_time: utc_from_timestamp(result.event_time), headers: headers.clone(), }; @@ -151,6 +153,7 @@ mod tests { offset: "0".into(), }, event_time: chrono::Utc::now(), + id: "".to_string(), headers: Default::default(), }; diff --git a/rust/serving/src/lib.rs b/rust/serving/src/lib.rs index 929a28f050..4a86d0a0bf 100644 --- a/rust/serving/src/lib.rs +++ b/rust/serving/src/lib.rs @@ -1,11 +1,11 @@ -use tracing::{error, info}; -use tracing_subscriber::layer::SubscriberExt; -use tracing_subscriber::util::SubscriberInitExt; +pub use self::error::{Error, Result}; use crate::app::start_main_server; use crate::config::config; -pub use self::error::{Error, Result}; use crate::metrics::start_metrics_server; use crate::pipeline::pipeline_spec; +use tracing::{error, info}; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::util::SubscriberInitExt; mod app; mod config; diff --git a/rust/src/bin/main.rs b/rust/src/bin/main.rs index ec715b0b34..8c29f33f73 100644 --- a/rust/src/bin/main.rs +++ b/rust/src/bin/main.rs @@ -17,4 +17,4 @@ async fn main() { } else { error!("Invalid argument. Use --serve, --servesink, or --monovertex."); } -} \ No newline at end of file +} From e9c5fa2c8076374fd1162b7eb0aaf164b8d06e6e Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Tue, 13 Aug 2024 18:43:27 -0400 Subject: [PATCH 20/23] chore: implement APIs for mono vertex UI server (#1931) Signed-off-by: Keran Yang Signed-off-by: Yashash H L Co-authored-by: Yashash H L --- pkg/apis/numaflow/v1alpha1/const.go | 4 + pkg/reconciler/metrics.go | 6 +- pkg/reconciler/pipeline/controller.go | 4 +- pkg/reconciler/vertex/controller.go | 2 +- server/apis/interface.go | 4 + server/apis/v1/handler.go | 193 +++++++++++++++++---- server/apis/v1/response_cluster_summary.go | 21 ++- server/apis/v1/response_mono_vertex.go | 39 +++++ server/apis/v1/response_pipeline.go | 2 +- server/authz/consts.go | 11 +- server/cmd/server/start.go | 4 + server/cmd/server/start_test.go | 4 +- server/routes/routes.go | 12 +- test/api-e2e/api_test.go | 21 ++- test/api-e2e/testdata.go | 30 ++++ 15 files changed, 306 insertions(+), 51 deletions(-) create mode 100644 server/apis/v1/response_mono_vertex.go diff --git a/pkg/apis/numaflow/v1alpha1/const.go b/pkg/apis/numaflow/v1alpha1/const.go index 19a3274cef..a1a8e518fa 100644 --- a/pkg/apis/numaflow/v1alpha1/const.go +++ b/pkg/apis/numaflow/v1alpha1/const.go @@ -228,6 +228,10 @@ const ( PipelineStatusDeleting = "deleting" PipelineStatusUnhealthy = "unhealthy" + // MonoVertex health status + // TODO - more statuses to be added + MonoVertexStatusHealthy = "healthy" + // Callback annotation keys CallbackEnabledKey = "numaflow.numaproj.io/callback" CallbackURLKey = "numaflow.numaproj.io/callback-url" diff --git a/pkg/reconciler/metrics.go b/pkg/reconciler/metrics.go index 37d4cc5148..5f92049f2d 100644 --- a/pkg/reconciler/metrics.go +++ b/pkg/reconciler/metrics.go @@ -59,8 +59,8 @@ var ( Help: "A metric indicates the replicas of a Redis ISB Service", }, []string{metrics.LabelNamespace, metrics.LabelISBService}) - // VertexDisiredReplicas indicates the desired replicas of a Vertex. - VertexDisiredReplicas = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + // VertexDesiredReplicas indicates the desired replicas of a Vertex. + VertexDesiredReplicas = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Subsystem: "controller", Name: "vertex_desired_replicas", Help: "A metric indicates the desired replicas of a Vertex", @@ -75,5 +75,5 @@ var ( ) func init() { - ctrlmetrics.Registry.MustRegister(BuildInfo, ISBSvcHealth, PipelineHealth, JetStreamISBSvcReplicas, RedisISBSvcReplicas, VertexDisiredReplicas, VertexCurrentReplicas) + ctrlmetrics.Registry.MustRegister(BuildInfo, ISBSvcHealth, PipelineHealth, JetStreamISBSvcReplicas, RedisISBSvcReplicas, VertexDesiredReplicas, VertexCurrentReplicas) } diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index c9d824e941..0be76a8d0b 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -129,7 +129,7 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( // Clean up metrics _ = reconciler.PipelineHealth.DeleteLabelValues(pl.Namespace, pl.Name) // Delete corresponding vertex metrics - _ = reconciler.VertexDisiredReplicas.DeletePartialMatch(map[string]string{metrics.LabelNamespace: pl.Namespace, metrics.LabelPipeline: pl.Name}) + _ = reconciler.VertexDesiredReplicas.DeletePartialMatch(map[string]string{metrics.LabelNamespace: pl.Namespace, metrics.LabelPipeline: pl.Name}) _ = reconciler.VertexCurrentReplicas.DeletePartialMatch(map[string]string{metrics.LabelNamespace: pl.Namespace, metrics.LabelPipeline: pl.Name}) } return ctrl.Result{}, nil @@ -292,7 +292,7 @@ func (r *pipelineReconciler) reconcileNonLifecycleChanges(ctx context.Context, p log.Infow("Deleted stale vertex successfully", zap.String("vertex", v.Name)) r.recorder.Eventf(pl, corev1.EventTypeNormal, "DeleteStaleVertexSuccess", "Deleted stale vertex %s successfully", v.Name) // Clean up vertex replica metrics - reconciler.VertexDisiredReplicas.DeleteLabelValues(pl.Namespace, pl.Name, v.Spec.Name) + reconciler.VertexDesiredReplicas.DeleteLabelValues(pl.Namespace, pl.Name, v.Spec.Name) reconciler.VertexCurrentReplicas.DeleteLabelValues(pl.Namespace, pl.Name, v.Spec.Name) } diff --git a/pkg/reconciler/vertex/controller.go b/pkg/reconciler/vertex/controller.go index b88b3b94f8..f9e82436d1 100644 --- a/pkg/reconciler/vertex/controller.go +++ b/pkg/reconciler/vertex/controller.go @@ -126,7 +126,7 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( desiredReplicas := vertex.GetReplicas() // Set metrics defer func() { - reconciler.VertexDisiredReplicas.WithLabelValues(vertex.Namespace, vertex.Spec.PipelineName, vertex.Spec.Name).Set(float64(desiredReplicas)) + reconciler.VertexDesiredReplicas.WithLabelValues(vertex.Namespace, vertex.Spec.PipelineName, vertex.Spec.Name).Set(float64(desiredReplicas)) reconciler.VertexCurrentReplicas.WithLabelValues(vertex.Namespace, vertex.Spec.PipelineName, vertex.Spec.Name).Set(float64(vertex.Status.Replicas)) }() diff --git a/server/apis/interface.go b/server/apis/interface.go index b618063637..ff41eb3ebe 100644 --- a/server/apis/interface.go +++ b/server/apis/interface.go @@ -42,4 +42,8 @@ type Handler interface { PodLogs(c *gin.Context) GetNamespaceEvents(c *gin.Context) GetPipelineStatus(c *gin.Context) + ListMonoVertices(c *gin.Context) + GetMonoVertex(c *gin.Context) + ListMonoVertexPods(c *gin.Context) + CreateMonoVertex(c *gin.Context) } diff --git a/server/apis/v1/handler.go b/server/apis/v1/handler.go index 12fe7403b1..59dc00c810 100644 --- a/server/apis/v1/handler.go +++ b/server/apis/v1/handler.go @@ -241,13 +241,14 @@ func (h *handler) GetClusterSummary(c *gin.Context) { } type namespaceSummary struct { - pipelineSummary PipelineSummary - isbsvcSummary IsbServiceSummary + pipelineSummary PipelineSummary + isbsvcSummary IsbServiceSummary + monoVertexSummary MonoVertexSummary } var namespaceSummaryMap = make(map[string]namespaceSummary) // get pipeline summary - pipelineList, err := h.numaflowClient.Pipelines("").List(context.Background(), metav1.ListOptions{}) + pipelineList, err := h.numaflowClient.Pipelines("").List(c, metav1.ListOptions{}) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to fetch cluster summary, %s", err.Error())) return @@ -271,7 +272,7 @@ func (h *handler) GetClusterSummary(c *gin.Context) { } // get isbsvc summary - isbsvcList, err := h.numaflowClient.InterStepBufferServices("").List(context.Background(), metav1.ListOptions{}) + isbsvcList, err := h.numaflowClient.InterStepBufferServices("").List(c, metav1.ListOptions{}) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to fetch cluster summary, %s", err.Error())) return @@ -294,19 +295,45 @@ func (h *handler) GetClusterSummary(c *gin.Context) { namespaceSummaryMap[isbsvc.Namespace] = summary } + // get mono vertex summary + mvtList, err := h.numaflowClient.MonoVertices("").List(c, metav1.ListOptions{}) + if err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to fetch cluster summary, failed to fetch mono vertex list, %s", err.Error())) + return + } + for _, monoVertex := range mvtList.Items { + var summary namespaceSummary + if value, ok := namespaceSummaryMap[monoVertex.Namespace]; ok { + summary = value + } + status, err := getMonoVertexStatus(&monoVertex) + if err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to fetch cluster summary, failed to get the status of the mono vertex %s, %s", monoVertex.Name, err.Error())) + return + } + // if the mono vertex is healthy, increment the active count, otherwise increment the inactive count + // TODO - add more status types for mono vertex and update the logic here + if status == dfv1.MonoVertexStatusHealthy { + summary.monoVertexSummary.Active.increment(status) + } else { + summary.monoVertexSummary.Inactive++ + } + namespaceSummaryMap[monoVertex.Namespace] = summary + } + // get cluster summary var clusterSummary ClusterSummaryResponse // at this moment, if a namespace has neither pipeline nor isbsvc, it will not be included in the namespacedSummaryMap. // since we still want to pass these empty namespaces to the frontend, we add them here. for _, ns := range namespaces { if _, ok := namespaceSummaryMap[ns]; !ok { - // if the namespace is not in the namespaceSummaryMap, it means it has neither pipeline nor isbsvc + // if the namespace is not in the namespaceSummaryMap, it means it has none of the pipelines, isbsvc, or mono vertex // taking advantage of golang by default initializing the struct with zero value namespaceSummaryMap[ns] = namespaceSummary{} } } for name, summary := range namespaceSummaryMap { - clusterSummary = append(clusterSummary, NewNamespaceSummary(name, summary.pipelineSummary, summary.isbsvcSummary)) + clusterSummary = append(clusterSummary, NewNamespaceSummary(name, summary.pipelineSummary, summary.isbsvcSummary, summary.monoVertexSummary)) } // sort the cluster summary by namespace in alphabetical order, @@ -351,7 +378,7 @@ func (h *handler) CreatePipeline(c *gin.Context) { c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, nil)) return } - if _, err := h.numaflowClient.Pipelines(ns).Create(context.Background(), &pipelineSpec, metav1.CreateOptions{}); err != nil { + if _, err := h.numaflowClient.Pipelines(ns).Create(c, &pipelineSpec, metav1.CreateOptions{}); err != nil { h.respondWithError(c, fmt.Sprintf("Failed to create pipeline %q, %s", pipelineSpec.Name, err.Error())) return } @@ -377,7 +404,7 @@ func (h *handler) GetPipeline(c *gin.Context) { ns, pipeline := c.Param("namespace"), c.Param("pipeline") // get general pipeline info - pl, err := h.numaflowClient.Pipelines(ns).Get(context.Background(), pipeline, metav1.GetOptions{}) + pl, err := h.numaflowClient.Pipelines(ns).Get(c, pipeline, metav1.GetOptions{}) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to fetch pipeline %q namespace %q, %s", pipeline, ns, err.Error())) return @@ -417,7 +444,7 @@ func (h *handler) GetPipeline(c *gin.Context) { minWM int64 = math.MaxInt64 maxWM int64 = math.MinInt64 ) - watermarks, err := client.GetPipelineWatermarks(context.Background(), pipeline) + watermarks, err := client.GetPipelineWatermarks(c, pipeline) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to fetch pipeline: failed to calculate lag for pipeline %q: %s", pipeline, err.Error())) return @@ -464,7 +491,7 @@ func (h *handler) UpdatePipeline(c *gin.Context) { // dryRun is used to check if the operation is just a validation or an actual update dryRun := strings.EqualFold("true", c.DefaultQuery("dry-run", "false")) - oldSpec, err := h.numaflowClient.Pipelines(ns).Get(context.Background(), pipeline, metav1.GetOptions{}) + oldSpec, err := h.numaflowClient.Pipelines(ns).Get(c, pipeline, metav1.GetOptions{}) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to fetch pipeline %q namespace %q, %s", pipeline, ns, err.Error())) return @@ -500,7 +527,7 @@ func (h *handler) UpdatePipeline(c *gin.Context) { } oldSpec.Spec = updatedSpec.Spec - if _, err := h.numaflowClient.Pipelines(ns).Update(context.Background(), oldSpec, metav1.UpdateOptions{}); err != nil { + if _, err := h.numaflowClient.Pipelines(ns).Update(c, oldSpec, metav1.UpdateOptions{}); err != nil { h.respondWithError(c, fmt.Sprintf("Failed to update pipeline %q, %s", pipeline, err.Error())) return } @@ -518,7 +545,7 @@ func (h *handler) DeletePipeline(c *gin.Context) { ns, pipeline := c.Param("namespace"), c.Param("pipeline") - if err := h.numaflowClient.Pipelines(ns).Delete(context.Background(), pipeline, metav1.DeleteOptions{}); err != nil { + if err := h.numaflowClient.Pipelines(ns).Delete(c, pipeline, metav1.DeleteOptions{}); err != nil { h.respondWithError(c, fmt.Sprintf("Failed to delete pipeline %q, %s", pipeline, err.Error())) return } @@ -551,7 +578,7 @@ func (h *handler) PatchPipeline(c *gin.Context) { return } - if _, err := h.numaflowClient.Pipelines(ns).Patch(context.Background(), pipeline, types.MergePatchType, patchSpec, metav1.PatchOptions{}); err != nil { + if _, err := h.numaflowClient.Pipelines(ns).Patch(c, pipeline, types.MergePatchType, patchSpec, metav1.PatchOptions{}); err != nil { h.respondWithError(c, fmt.Sprintf("Failed to patch pipeline %q, %s", pipeline, err.Error())) return } @@ -593,7 +620,7 @@ func (h *handler) CreateInterStepBufferService(c *gin.Context) { return } - if _, err := h.numaflowClient.InterStepBufferServices(ns).Create(context.Background(), &isbsvcSpec, metav1.CreateOptions{}); err != nil { + if _, err := h.numaflowClient.InterStepBufferServices(ns).Create(c, &isbsvcSpec, metav1.CreateOptions{}); err != nil { h.respondWithError(c, fmt.Sprintf("Failed to create interstepbuffer service %q, %s", isbsvcSpec.Name, err.Error())) return } @@ -616,7 +643,7 @@ func (h *handler) ListInterStepBufferServices(c *gin.Context) { func (h *handler) GetInterStepBufferService(c *gin.Context) { ns, isbsvcName := c.Param("namespace"), c.Param("isb-service") - isbsvc, err := h.numaflowClient.InterStepBufferServices(ns).Get(context.Background(), isbsvcName, metav1.GetOptions{}) + isbsvc, err := h.numaflowClient.InterStepBufferServices(ns).Get(c, isbsvcName, metav1.GetOptions{}) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to fetch interstepbuffer service %q namespace %q, %s", isbsvcName, ns, err.Error())) return @@ -645,7 +672,7 @@ func (h *handler) UpdateInterStepBufferService(c *gin.Context) { // dryRun is used to check if the operation is just a validation or an actual update dryRun := strings.EqualFold("true", c.DefaultQuery("dry-run", "false")) - isbSVC, err := h.numaflowClient.InterStepBufferServices(ns).Get(context.Background(), isbsvcName, metav1.GetOptions{}) + isbSVC, err := h.numaflowClient.InterStepBufferServices(ns).Get(c, isbsvcName, metav1.GetOptions{}) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to get the interstep buffer service: namespace %q isb-services %q: %s", ns, isbsvcName, err.Error())) return @@ -668,7 +695,7 @@ func (h *handler) UpdateInterStepBufferService(c *gin.Context) { return } isbSVC.Spec = updatedSpec.Spec - updatedISBSvc, err := h.numaflowClient.InterStepBufferServices(ns).Update(context.Background(), isbSVC, metav1.UpdateOptions{}) + updatedISBSvc, err := h.numaflowClient.InterStepBufferServices(ns).Update(c, isbSVC, metav1.UpdateOptions{}) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to update the interstep buffer service: namespace %q isb-services %q: %s", ns, isbsvcName, err.Error())) return @@ -685,7 +712,7 @@ func (h *handler) DeleteInterStepBufferService(c *gin.Context) { ns, isbsvcName := c.Param("namespace"), c.Param("isb-service") - pipelines, err := h.numaflowClient.Pipelines(ns).List(context.Background(), metav1.ListOptions{}) + pipelines, err := h.numaflowClient.Pipelines(ns).List(c, metav1.ListOptions{}) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to get pipelines in namespace %q, %s", ns, err.Error())) return @@ -698,7 +725,7 @@ func (h *handler) DeleteInterStepBufferService(c *gin.Context) { } } - err = h.numaflowClient.InterStepBufferServices(ns).Delete(context.Background(), isbsvcName, metav1.DeleteOptions{}) + err = h.numaflowClient.InterStepBufferServices(ns).Delete(c, isbsvcName, metav1.DeleteOptions{}) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to delete the interstep buffer service: namespace %q isb-service %q: %s", ns, isbsvcName, err.Error())) @@ -718,7 +745,7 @@ func (h *handler) ListPipelineBuffers(c *gin.Context) { return } - buffers, err := client.ListPipelineBuffers(context.Background(), pipeline) + buffers, err := client.ListPipelineBuffers(c, pipeline) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to get the Inter-Step buffers for pipeline %q: %s", pipeline, err.Error())) return @@ -737,7 +764,7 @@ func (h *handler) GetPipelineWatermarks(c *gin.Context) { return } - watermarks, err := client.GetPipelineWatermarks(context.Background(), pipeline) + watermarks, err := client.GetPipelineWatermarks(c, pipeline) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to get the watermarks for pipeline %q: %s", pipeline, err.Error())) return @@ -766,7 +793,7 @@ func (h *handler) UpdateVertex(c *gin.Context) { dryRun = strings.EqualFold("true", c.DefaultQuery("dry-run", "false")) ) - oldPipelineSpec, err := h.numaflowClient.Pipelines(ns).Get(context.Background(), pipeline, metav1.GetOptions{}) + oldPipelineSpec, err := h.numaflowClient.Pipelines(ns).Get(c, pipeline, metav1.GetOptions{}) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to update the vertex: namespace %q pipeline %q vertex %q: %s", ns, pipeline, inputVertexName, err.Error())) @@ -804,7 +831,7 @@ func (h *handler) UpdateVertex(c *gin.Context) { } oldPipelineSpec.Spec = newPipelineSpec.Spec - if _, err := h.numaflowClient.Pipelines(ns).Update(context.Background(), oldPipelineSpec, metav1.UpdateOptions{}); err != nil { + if _, err := h.numaflowClient.Pipelines(ns).Update(c, oldPipelineSpec, metav1.UpdateOptions{}); err != nil { h.respondWithError(c, fmt.Sprintf("Failed to update the vertex: namespace %q pipeline %q vertex %q: %s", ns, pipeline, inputVertexName, err.Error())) return @@ -817,7 +844,7 @@ func (h *handler) UpdateVertex(c *gin.Context) { func (h *handler) GetVerticesMetrics(c *gin.Context) { ns, pipeline := c.Param("namespace"), c.Param("pipeline") - pl, err := h.numaflowClient.Pipelines(ns).Get(context.Background(), pipeline, metav1.GetOptions{}) + pl, err := h.numaflowClient.Pipelines(ns).Get(c, pipeline, metav1.GetOptions{}) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to get the vertices metrics: namespace %q pipeline %q: %s", ns, pipeline, err.Error())) return @@ -831,7 +858,7 @@ func (h *handler) GetVerticesMetrics(c *gin.Context) { var results = make(map[string][]*daemon.VertexMetrics) for _, vertex := range pl.Spec.Vertices { - metrics, err := client.GetVertexMetrics(context.Background(), pipeline, vertex.Name) + metrics, err := client.GetVertexMetrics(c, pipeline, vertex.Name) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to get the vertices metrics: namespace %q pipeline %q vertex %q: %s", ns, pipeline, vertex.Name, err.Error())) return @@ -847,7 +874,7 @@ func (h *handler) ListVertexPods(c *gin.Context) { ns, pipeline, vertex := c.Param("namespace"), c.Param("pipeline"), c.Param("vertex") limit, _ := strconv.ParseInt(c.Query("limit"), 10, 64) - pods, err := h.kubeClient.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{ + pods, err := h.kubeClient.CoreV1().Pods(ns).List(c, metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s,%s=%s", dfv1.KeyPipelineName, pipeline, dfv1.KeyVertexName, vertex), Limit: limit, Continue: c.Query("continue"), @@ -866,7 +893,7 @@ func (h *handler) ListPodsMetrics(c *gin.Context) { ns := c.Param("namespace") limit, _ := strconv.ParseInt(c.Query("limit"), 10, 64) - metrics, err := h.metricsClient.MetricsV1beta1().PodMetricses(ns).List(context.Background(), metav1.ListOptions{ + metrics, err := h.metricsClient.MetricsV1beta1().PodMetricses(ns).List(c, metav1.ListOptions{ Limit: limit, Continue: c.Query("continue"), }) @@ -936,7 +963,7 @@ func (h *handler) GetNamespaceEvents(c *gin.Context) { limit, _ := strconv.ParseInt(c.Query("limit"), 10, 64) var err error var events *corev1.EventList - if events, err = h.kubeClient.CoreV1().Events(ns).List(context.Background(), metav1.ListOptions{ + if events, err = h.kubeClient.CoreV1().Events(ns).List(c, metav1.ListOptions{ Limit: limit, Continue: c.Query("continue"), }); err != nil { @@ -992,7 +1019,7 @@ func (h *handler) GetPipelineStatus(c *gin.Context) { return } // Get the data criticality for the given pipeline - dataStatus, err := client.GetPipelineStatus(context.Background(), pipeline) + dataStatus, err := client.GetPipelineStatus(c, pipeline) if err != nil { h.respondWithError(c, fmt.Sprintf("Failed to get the dataStatus for pipeline %q: %s", pipeline, err.Error())) return @@ -1006,6 +1033,91 @@ func (h *handler) GetPipelineStatus(c *gin.Context) { c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, response)) } +// ListMonoVertices is used to provide all the mono vertices in a namespace. +func (h *handler) ListMonoVertices(c *gin.Context) { + ns := c.Param("namespace") + mvtList, err := getMonoVertices(h, ns) + if err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to fetch all mono vertices for namespace %q, %s", ns, err.Error())) + return + } + c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, mvtList)) +} + +// GetMonoVertex is used to provide the spec of a given mono vertex +func (h *handler) GetMonoVertex(c *gin.Context) { + ns, monoVertex := c.Param("namespace"), c.Param("mono-vertex") + // get general mono vertex info + mvt, err := h.numaflowClient.MonoVertices(ns).Get(c, monoVertex, metav1.GetOptions{}) + if err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to fetch mono vertex %q in namespace %q, %s", mvt, ns, err.Error())) + return + } + // set mono vertex kind and apiVersion + mvt.Kind = dfv1.MonoVertexGroupVersionKind.Kind + mvt.APIVersion = dfv1.SchemeGroupVersion.String() + // get mono vertex status + status, err := getMonoVertexStatus(mvt) + if err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to fetch mono vertex %q from namespace %q, %s", monoVertex, ns, err.Error())) + return + } + monoVertexResp := NewMonoVertexInfo(status, mvt) + c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, monoVertexResp)) +} + +// CreateMonoVertex is used to create a mono vertex +func (h *handler) CreateMonoVertex(c *gin.Context) { + if h.opts.readonly { + errMsg := "Failed to perform this operation in read only mode" + c.JSON(http.StatusForbidden, NewNumaflowAPIResponse(&errMsg, nil)) + return + } + + ns := c.Param("namespace") + // dryRun is used to check if the operation is just a validation or an actual creation + dryRun := strings.EqualFold("true", c.DefaultQuery("dry-run", "false")) + + var monoVertexSpec dfv1.MonoVertex + if err := bindJson(c, &monoVertexSpec); err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to decode JSON request body to mono vertex spec, %s", err.Error())) + return + } + + if requestedNs := monoVertexSpec.Namespace; !isValidNamespaceSpec(requestedNs, ns) { + h.respondWithError(c, fmt.Sprintf("namespace mismatch, expected %s, got %s", ns, requestedNs)) + return + } + monoVertexSpec.Namespace = ns + // if the validation flag "dryRun" is set to true, return without creating the pipeline + if dryRun { + c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, nil)) + return + } + if _, err := h.numaflowClient.MonoVertices(ns).Create(c, &monoVertexSpec, metav1.CreateOptions{}); err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to create mono vertex %q, %s", monoVertexSpec.Name, err.Error())) + return + } + c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, nil)) +} + +// ListMonoVertexPods is used to provide all the pods of a mono vertex +func (h *handler) ListMonoVertexPods(c *gin.Context) { + ns, monoVertex := c.Param("namespace"), c.Param("mono-vertex") + limit, _ := strconv.ParseInt(c.Query("limit"), 10, 64) + pods, err := h.kubeClient.CoreV1().Pods(ns).List(c, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", dfv1.KeyMonoVertexName, monoVertex), + Limit: limit, + Continue: c.Query("continue"), + }) + if err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to get a list of pods: namespace %q mono vertex %q: %s", + ns, monoVertex, err.Error())) + return + } + c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, pods.Items)) +} + // getAllNamespaces is a utility used to fetch all the namespaces in the cluster // except the kube system namespaces func getAllNamespaces(h *handler) ([]string, error) { @@ -1062,6 +1174,24 @@ func getIsbServices(h *handler, namespace string) (ISBServices, error) { return isbList, nil } +// getMonoVertices is a utility used to fetch all the mono vertices in a given namespace +func getMonoVertices(h *handler, namespace string) (MonoVertices, error) { + mvtList, err := h.numaflowClient.MonoVertices(namespace).List(context.Background(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + var resList MonoVertices + for _, mvt := range mvtList.Items { + status, err := getMonoVertexStatus(&mvt) + if err != nil { + return nil, err + } + resp := NewMonoVertexInfo(status, &mvt) + resList = append(resList, resp) + } + return resList, nil +} + // GetPipelineStatus is used to provide the status of a given pipeline // TODO(API): Change the Daemon service to return the consolidated status of the pipeline // to save on multiple calls to the daemon service @@ -1092,6 +1222,11 @@ func getIsbServiceStatus(isbsvc *dfv1.InterStepBufferService) (string, error) { return retStatus, nil } +func getMonoVertexStatus(mvt *dfv1.MonoVertex) (string, error) { + // TODO - add more logic to determine the status of a mono vertex + return dfv1.MonoVertexStatusHealthy, nil +} + // validatePipelineSpec is used to validate the pipeline spec during create and update func validatePipelineSpec(h *handler, oldPipeline *dfv1.Pipeline, newPipeline *dfv1.Pipeline, validType string) error { ns := newPipeline.Namespace diff --git a/server/apis/v1/response_cluster_summary.go b/server/apis/v1/response_cluster_summary.go index fd7b033330..4d89c47e45 100644 --- a/server/apis/v1/response_cluster_summary.go +++ b/server/apis/v1/response_cluster_summary.go @@ -59,6 +59,16 @@ func (is *IsbServiceSummary) hasIsbService() bool { return is.Inactive > 0 || !is.Active.isEmpty() } +// MonoVertexSummary summarizes the number of active and inactive mono vertices. +type MonoVertexSummary struct { + Active ActiveStatus `json:"active"` + Inactive int `json:"inactive"` +} + +func (mvs *MonoVertexSummary) hasMonoVertex() bool { + return mvs.Inactive > 0 || !mvs.Active.isEmpty() +} + // ClusterSummaryResponse is a list of NamespaceSummary // of all the namespaces in a cluster wrapped in a list. type ClusterSummaryResponse []NamespaceSummary @@ -72,15 +82,20 @@ type NamespaceSummary struct { Namespace string `json:"namespace"` PipelineSummary PipelineSummary `json:"pipelineSummary"` IsbServiceSummary IsbServiceSummary `json:"isbServiceSummary"` + MonoVertexSummary MonoVertexSummary `json:"monoVertexSummary"` } // NewNamespaceSummary creates a new NamespaceSummary object with the given specifications. -func NewNamespaceSummary(namespace string, pipelineSummary PipelineSummary, - isbSummary IsbServiceSummary) NamespaceSummary { +func NewNamespaceSummary( + namespace string, + pipelineSummary PipelineSummary, + isbSummary IsbServiceSummary, + monoVertexSummary MonoVertexSummary) NamespaceSummary { return NamespaceSummary{ - IsEmpty: !(pipelineSummary.hasPipeline() || isbSummary.hasIsbService()), + IsEmpty: !(pipelineSummary.hasPipeline() || isbSummary.hasIsbService() || monoVertexSummary.hasMonoVertex()), Namespace: namespace, PipelineSummary: pipelineSummary, IsbServiceSummary: isbSummary, + MonoVertexSummary: monoVertexSummary, } } diff --git a/server/apis/v1/response_mono_vertex.go b/server/apis/v1/response_mono_vertex.go new file mode 100644 index 0000000000..290db26917 --- /dev/null +++ b/server/apis/v1/response_mono_vertex.go @@ -0,0 +1,39 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + +// MonoVertices is a list of mono vertices +type MonoVertices []MonoVertexInfo + +type MonoVertexInfo struct { + Name string `json:"name"` + // Status shows whether the mono vertex is healthy, warning, critical or inactive. + Status string `json:"status"` + // MonoVertex contains the detailed mono vertex spec. + MonoVertex v1alpha1.MonoVertex `json:"monoVertex"` +} + +// NewMonoVertexInfo creates a new MonoVertexInfo object with the given status and lag +func NewMonoVertexInfo(status string, mvt *v1alpha1.MonoVertex) MonoVertexInfo { + return MonoVertexInfo{ + Name: mvt.Name, + Status: status, + MonoVertex: *mvt, + } +} diff --git a/server/apis/v1/response_pipeline.go b/server/apis/v1/response_pipeline.go index 0a091c4cd7..d7c0cb0902 100644 --- a/server/apis/v1/response_pipeline.go +++ b/server/apis/v1/response_pipeline.go @@ -31,7 +31,7 @@ type PipelineInfo struct { Pipeline v1alpha1.Pipeline `json:"pipeline"` } -// NewPipelineInfo creates a new PipelineInfo object with the given status +// NewPipelineInfo creates a new PipelineInfo object with the given status and lag func NewPipelineInfo(status string, lag *int64, pl *v1alpha1.Pipeline) PipelineInfo { return PipelineInfo{ Name: pl.Name, diff --git a/server/authz/consts.go b/server/authz/consts.go index d141a5b064..4f43fb1129 100644 --- a/server/authz/consts.go +++ b/server/authz/consts.go @@ -20,15 +20,16 @@ const ( // PolicyMapPath is the path to the policy map. policyMapPath = "/etc/numaflow/rbac-policy.csv" - // rbacPropertiesPath is the path to the rbac properties file. It includes configuraion for authorization like + // rbacPropertiesPath is the path to the rbac properties file. It includes configuration for authorization like // scope, default policy etc. rbacPropertiesPath = "/etc/numaflow/rbac-conf.yaml" // Objects for the RBAC policy - ObjectAll = "*" - ObjectPipeline = "pipeline" - ObjectISBSvc = "isbsvc" - ObjectEvents = "events" + ObjectAll = "*" + ObjectPipeline = "pipeline" + ObjectMonoVertex = "mono-vertex" + ObjectISBSvc = "isbsvc" + ObjectEvents = "events" // Resouces for the RBAC policy ResourceAll = "*" diff --git a/server/cmd/server/start.go b/server/cmd/server/start.go index 24f8e25179..eccfca45a3 100644 --- a/server/cmd/server/start.go +++ b/server/cmd/server/start.go @@ -200,5 +200,9 @@ func CreateAuthRouteMap(baseHref string) authz.RouteMap { "GET:" + baseHref + "api/v1/metrics/namespaces/:namespace/pods": authz.NewRouteInfo(authz.ObjectPipeline, true), "GET:" + baseHref + "api/v1/namespaces/:namespace/pods/:pod/logs": authz.NewRouteInfo(authz.ObjectPipeline, true), "GET:" + baseHref + "api/v1/namespaces/:namespace/events": authz.NewRouteInfo(authz.ObjectEvents, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices": authz.NewRouteInfo(authz.ObjectMonoVertex, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex": authz.NewRouteInfo(authz.ObjectMonoVertex, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex/pods": authz.NewRouteInfo(authz.ObjectMonoVertex, true), + "POST:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices": authz.NewRouteInfo(authz.ObjectMonoVertex, true), } } diff --git a/server/cmd/server/start_test.go b/server/cmd/server/start_test.go index 645e53a3ff..1df0d5b8f9 100644 --- a/server/cmd/server/start_test.go +++ b/server/cmd/server/start_test.go @@ -25,12 +25,12 @@ import ( func TestCreateAuthRouteMap(t *testing.T) { t.Run("empty base", func(t *testing.T) { got := CreateAuthRouteMap("") - assert.Equal(t, 24, len(got)) + assert.Equal(t, 28, len(got)) }) t.Run("customize base", func(t *testing.T) { got := CreateAuthRouteMap("abcdefg") - assert.Equal(t, 24, len(got)) + assert.Equal(t, 28, len(got)) for k := range got { assert.Contains(t, k, "abcdefg") } diff --git a/server/routes/routes.go b/server/routes/routes.go index 580a819079..deb0cbcb6e 100644 --- a/server/routes/routes.go +++ b/server/routes/routes.go @@ -115,9 +115,9 @@ func v1Routes(ctx context.Context, r gin.IRouter, dexObj *v1.DexObject, localUse r.GET("/cluster-summary", handler.GetClusterSummary) // Create a Pipeline. r.POST("/namespaces/:namespace/pipelines", handler.CreatePipeline) - // All pipelines for a given namespace. + // List all pipelines for a given namespace. r.GET("/namespaces/:namespace/pipelines", handler.ListPipelines) - // Get a Pipeline information. + // Get the pipeline information. r.GET("/namespaces/:namespace/pipelines/:pipeline", handler.GetPipeline) // Get a Pipeline health information. r.GET("/namespaces/:namespace/pipelines/:pipeline/health", handler.GetPipelineStatus) @@ -153,6 +153,14 @@ func v1Routes(ctx context.Context, r gin.IRouter, dexObj *v1.DexObject, localUse r.GET("/namespaces/:namespace/pods/:pod/logs", handler.PodLogs) // List of the Kubernetes events of a namespace. r.GET("/namespaces/:namespace/events", handler.GetNamespaceEvents) + // List all mono vertices for a given namespace. + r.GET("/namespaces/:namespace/mono-vertices", handler.ListMonoVertices) + // Get the mono vertex information. + r.GET("/namespaces/:namespace/mono-vertices/:mono-vertex", handler.GetMonoVertex) + // Get all the pods of a mono vertex. + r.GET("/namespaces/:namespace/mono-vertices/:mono-vertex/pods", handler.ListMonoVertexPods) + // Create a mono vertex. + r.POST("/namespaces/:namespace/mono-vertices", handler.CreateMonoVertex) } // authMiddleware is the middleware for AuthN/AuthZ. diff --git a/test/api-e2e/api_test.go b/test/api-e2e/api_test.go index 819dd39261..ae927e51d4 100644 --- a/test/api-e2e/api_test.go +++ b/test/api-e2e/api_test.go @@ -122,7 +122,7 @@ func (s *APISuite) TestISBSVCReplica1() { assert.Contains(s.T(), deleteISBSVC, deleteISBSVCSuccessExpect) } -func (s *APISuite) TestPipeline0() { +func (s *APISuite) TestAPIsForIsbAndPipelineAndMonoVertex() { defer s.Given().When().UXServerPodPortForward(8145, 8443).TerminateAllPodPortForwards() namespaceBody := HTTPExpect(s.T(), "https://localhost:8145").GET("/api/v1/namespaces"). @@ -158,10 +158,20 @@ func (s *APISuite) TestPipeline0() { Status(200).Body().Raw() assert.Contains(s.T(), resumePipeline1, patchPipelineSuccessExpect) + // create a mono vertex + var mv1 v1alpha1.MonoVertex + err = json.Unmarshal(testMonoVertex1, &mv1) + assert.NoError(s.T(), err) + createMonoVertex := HTTPExpect(s.T(), "https://localhost:8145").POST(fmt.Sprintf("/api/v1/namespaces/%s/mono-vertices", Namespace)).WithJSON(mv1). + Expect(). + Status(200).Body().Raw() + var createMonoVertexSuccessExpect = `"data":null` + assert.Contains(s.T(), createMonoVertex, createMonoVertexSuccessExpect) + clusterSummaryBody := HTTPExpect(s.T(), "https://localhost:8145").GET("/api/v1/cluster-summary"). Expect(). Status(200).Body().Raw() - var clusterSummaryExpect = `{"isEmpty":false,"namespace":"numaflow-system","pipelineSummary":{"active":{"Healthy":2,"Warning":0,"Critical":0},"inactive":0},"isbServiceSummary":{"active":{"Healthy":1,"Warning":0,"Critical":0},"inactive":0}}` + var clusterSummaryExpect = `{"isEmpty":false,"namespace":"numaflow-system","pipelineSummary":{"active":{"Healthy":2,"Warning":0,"Critical":0},"inactive":0},"isbServiceSummary":{"active":{"Healthy":1,"Warning":0,"Critical":0},"inactive":0},"monoVertexSummary":{"active":{"Healthy":1,"Warning":0,"Critical":0},"inactive":0}}` assert.Contains(s.T(), clusterSummaryBody, clusterSummaryExpect) listPipelineBody := HTTPExpect(s.T(), "https://localhost:8145").GET(fmt.Sprintf("/api/v1/namespaces/%s/pipelines", Namespace)). @@ -179,9 +189,14 @@ func (s *APISuite) TestPipeline0() { var deletePipelineSuccessExpect = `"data":null` assert.Contains(s.T(), deletePipeline1, deletePipelineSuccessExpect) assert.Contains(s.T(), deletePipeline2, deletePipelineSuccessExpect) + + listMonoVertexBody := HTTPExpect(s.T(), "https://localhost:8145").GET(fmt.Sprintf("/api/v1/namespaces/%s/mono-vertices", Namespace)). + Expect(). + Status(200).Body().Raw() + assert.Contains(s.T(), listMonoVertexBody, testMonoVertex1Name) } -func (s *APISuite) TestPipeline1() { +func (s *APISuite) TestAPIsForMetricsAndWatermarkAndPods() { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() diff --git a/test/api-e2e/testdata.go b/test/api-e2e/testdata.go index 24623c54eb..5322b38ce8 100644 --- a/test/api-e2e/testdata.go +++ b/test/api-e2e/testdata.go @@ -141,4 +141,34 @@ var ( } } `) + testMonoVertex1Name = "test-mono-vertex-1" + testMonoVertex1 = []byte(` +{ + "apiVersion": "numaflow.numaproj.io/v1alpha1", + "kind": "MonoVertex", + "metadata": { + "name": "test-mono-vertex-1" + }, + "spec": { + "source": { + "udsource": { + "container": { + "image": "quay.io/numaio/numaflow-java/source-simple-source:stable" + } + }, + "transformer": { + "container": { + "image": "quay.io/numaio/numaflow-rs/source-transformer-now:stable" + } + } + }, + "sink": { + "udsink": { + "container": { + "image": "quay.io/numaio/numaflow-java/simple-sink:stable" + } + } + } + } +}`) ) From 7f44c4330260e6c771d0ffb55e64d328318e123f Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Wed, 14 Aug 2024 09:33:58 +0530 Subject: [PATCH 21/23] chore: update last updated time for mvtx (#1939) Signed-off-by: Yashash H L --- pkg/reconciler/monovertex/controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index 3688d87b60..20246b8f9c 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -75,7 +75,7 @@ func (mr *monoVertexReconciler) Reconcile(ctx context.Context, req ctrl.Request) if err != nil { log.Errorw("Reconcile error", zap.Error(err)) } - + monoVtxCopy.Status.LastUpdated = metav1.Now() if !equality.Semantic.DeepEqual(monoVtx.Status, monoVtxCopy.Status) { if err := mr.client.Status().Update(ctx, monoVtxCopy); err != nil { return reconcile.Result{}, err From b05ad789019cd101d54521760b98e2360e8ba0b7 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Wed, 14 Aug 2024 09:54:45 +0530 Subject: [PATCH 22/23] chore: avoid partial acks (#1938) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/monovertex/src/forwarder.rs | 36 +++++++++++++++----------------- rust/monovertex/src/lib.rs | 10 ++++++--- rust/monovertex/src/metrics.rs | 11 ++++++---- 3 files changed, 31 insertions(+), 26 deletions(-) diff --git a/rust/monovertex/src/forwarder.rs b/rust/monovertex/src/forwarder.rs index 014789b7b1..e2dcd6b3d7 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/monovertex/src/forwarder.rs @@ -1,11 +1,3 @@ -use chrono::Utc; -use metrics::counter; -use std::collections::HashMap; -use tokio::task::JoinSet; -use tokio::time::sleep; -use tokio_util::sync::CancellationToken; -use tracing::info; -use tracing::log::warn; use crate::config::config; use crate::error::{Error, Result}; use crate::message::Offset; @@ -16,6 +8,14 @@ use crate::metrics::{ use crate::sink::{proto, SinkClient}; use crate::source::SourceClient; use crate::transformer::TransformerClient; +use chrono::Utc; +use metrics::counter; +use std::collections::HashMap; +use tokio::task::JoinSet; +use tokio::time::sleep; +use tokio_util::sync::CancellationToken; +use tracing::info; +use tracing::log::warn; const MONO_VERTEX_TYPE: &str = "mono_vertex"; @@ -77,6 +77,9 @@ impl Forwarder { let messages = result?; info!("Read batch size: {} and latency - {}ms", messages.len(), start_time.elapsed().as_millis()); + // collect all the offsets as the transformer can drop (via filter) messages + let offsets = messages.iter().map(|msg| msg.offset.clone()).collect::>(); + messages_count += messages.len() as u64; let bytes_count = messages.iter().map(|msg| msg.value.len() as u64).sum::(); counter!(FORWARDER_READ_TOTAL, &self.common_labels).increment(messages_count); @@ -122,18 +125,8 @@ impl Forwarder { .map(|result| result.id.clone()) .collect(); - let successful_offsets: Vec = retry_messages.iter() - .filter(|msg| !failed_ids.contains(&msg.id)) - .map(|msg| msg.offset.clone()) - .collect(); - - - // ack the successful offsets - let n = successful_offsets.len(); - self.source_client.ack_fn(successful_offsets).await?; - counter!(FORWARDER_WRITE_TOTAL, &self.common_labels).increment(n as u64); attempts += 1; - + if failed_ids.is_empty() { break; } else { @@ -161,6 +154,11 @@ impl Forwarder { ))); } + // Acknowledge the messages back to the source + let start_time = tokio::time::Instant::now(); + self.source_client.ack_fn(offsets).await?; + info!("Ack latency - {}ms", start_time.elapsed().as_millis()); + counter!(FORWARDER_ACK_TOTAL, &self.common_labels).increment(messages_count); } } diff --git a/rust/monovertex/src/lib.rs b/rust/monovertex/src/lib.rs index c4d6bf21bf..823ffc6870 100644 --- a/rust/monovertex/src/lib.rs +++ b/rust/monovertex/src/lib.rs @@ -2,7 +2,7 @@ pub(crate) use self::error::Result; use crate::config::config; pub(crate) use crate::error::Error; use crate::forwarder::Forwarder; -use crate::metrics::{start_metrics_https_server, MetricsState, LagReaderBuilder}; +use crate::metrics::{start_metrics_https_server, LagReaderBuilder, MetricsState}; use crate::sink::{SinkClient, SinkConfig}; use crate::source::{SourceClient, SourceConfig}; use crate::transformer::{TransformerClient, TransformerConfig}; @@ -182,8 +182,12 @@ pub async fn init( // start the lag reader to publish lag metrics let mut lag_reader = LagReaderBuilder::new(source_client.clone()) - .lag_checking_interval(Duration::from_secs(config().lag_check_interval_in_secs.into())) - .refresh_interval(Duration::from_secs(config().lag_refresh_interval_in_secs.into())) + .lag_checking_interval(Duration::from_secs( + config().lag_check_interval_in_secs.into(), + )) + .refresh_interval(Duration::from_secs( + config().lag_refresh_interval_in_secs.into(), + )) .build(); lag_reader.start().await; diff --git a/rust/monovertex/src/metrics.rs b/rust/monovertex/src/metrics.rs index 69782e2c32..f3d5421dbc 100644 --- a/rust/monovertex/src/metrics.rs +++ b/rust/monovertex/src/metrics.rs @@ -107,7 +107,7 @@ pub(crate) async fn start_metrics_https_server( /// router for metrics and k8s health endpoints fn metrics_router(recorder_handle: PrometheusHandle, metrics_state: MetricsState) -> Router { - Router::new() + Router::new() .route("/metrics", get(move || ready(recorder_handle.render()))) .route("/livez", get(livez)) .route("/readyz", get(readyz)) @@ -197,7 +197,6 @@ pub(crate) struct LagReader { pending_stats: Arc>>, } - /// LagReaderBuilder is used to build a `LagReader` instance. pub(crate) struct LagReaderBuilder { source_client: SourceClient, @@ -227,8 +226,12 @@ impl LagReaderBuilder { pub(crate) fn build(self) -> LagReader { LagReader { source_client: self.source_client, - lag_checking_interval: self.lag_checking_interval.unwrap_or_else(|| Duration::from_secs(3)), - refresh_interval: self.refresh_interval.unwrap_or_else(|| Duration::from_secs(5)), + lag_checking_interval: self + .lag_checking_interval + .unwrap_or_else(|| Duration::from_secs(3)), + refresh_interval: self + .refresh_interval + .unwrap_or_else(|| Duration::from_secs(5)), buildup_handle: None, expose_handle: None, pending_stats: Arc::new(Mutex::new(Vec::with_capacity(MAX_PENDING_STATS))), From 96784f90d9e5c851faf32bcc8ab2059880aeba36 Mon Sep 17 00:00:00 2001 From: Saniya Kalamkar Date: Wed, 14 Aug 2024 13:34:39 -0700 Subject: [PATCH 23/23] feat: Using limit to calculate usage percentage. Fixes #1784 Signed-off-by: Saniya Kalamkar --- ui/src/utils/fetcherHooks/podsViewFetch.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/ui/src/utils/fetcherHooks/podsViewFetch.ts b/ui/src/utils/fetcherHooks/podsViewFetch.ts index ab9efa8b3d..179ddbf17d 100644 --- a/ui/src/utils/fetcherHooks/podsViewFetch.ts +++ b/ui/src/utils/fetcherHooks/podsViewFetch.ts @@ -49,7 +49,6 @@ export const usePodsViewFetch = ( const containers: string[] = []; const containerSpecMap = new Map(); pod?.spec?.containers?.forEach((container: any) => { - ///const cpu = container?.resources?.requests?.cpu; const cpu = container?.resources?.limits?.cpu; let cpuParsed: undefined | number; if (cpu) { @@ -59,7 +58,6 @@ export const usePodsViewFetch = ( cpuParsed = undefined; } } - //const memory = container?.resources?.requests?.memory; const memory = container?.resources?.limits?.memory; let memoryParsed: undefined | number; if (memory) {