diff --git a/cmd/controller/app/server.go b/cmd/controller/app/server.go index 75b4e385f..4bec1c2f7 100644 --- a/cmd/controller/app/server.go +++ b/cmd/controller/app/server.go @@ -14,7 +14,6 @@ import ( actionv1beta1 "github.com/stolostron/cluster-lifecycle-api/action/v1beta1" clusterinfov1beta1 "github.com/stolostron/cluster-lifecycle-api/clusterinfo/v1beta1" "github.com/stolostron/cluster-lifecycle-api/imageregistry/v1alpha1" - inventoryv1alpha1 "github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1" "github.com/stolostron/multicloud-operators-foundation/cmd/controller/app/options" "github.com/stolostron/multicloud-operators-foundation/pkg/addon" "github.com/stolostron/multicloud-operators-foundation/pkg/cache" @@ -30,7 +29,6 @@ import ( "github.com/stolostron/multicloud-operators-foundation/pkg/controllers/clusterset/syncrolebinding" "github.com/stolostron/multicloud-operators-foundation/pkg/controllers/gc" "github.com/stolostron/multicloud-operators-foundation/pkg/controllers/imageregistry" - "github.com/stolostron/multicloud-operators-foundation/pkg/controllers/inventory" "github.com/stolostron/multicloud-operators-foundation/pkg/helpers" "github.com/stolostron/multicloud-operators-foundation/pkg/utils" corev1 "k8s.io/api/core/v1" @@ -61,7 +59,6 @@ var ( func init() { _ = clientgoscheme.AddToScheme(scheme) - _ = inventoryv1alpha1.AddToScheme(scheme) _ = hiveinternalv1alpha1.AddToScheme(scheme) _ = hivev1.AddToScheme(scheme) _ = clusterinfov1beta1.AddToScheme(scheme) @@ -201,14 +198,6 @@ func Run(o *options.ControllerRunOptions, ctx context.Context) error { } } - // Setup reconciler - if o.EnableInventory { - if err = inventory.SetupWithManager(mgr); err != nil { - klog.Errorf("unable to setup inventory reconciler: %v", err) - return err - } - } - if err = clusterinfo.SetupWithManager(mgr, o.LogCertSecret); err != nil { klog.Errorf("unable to setup clusterInfo reconciler: %v", err) return err diff --git a/deploy/foundation/hub/resources/clusterrole.yaml b/deploy/foundation/hub/resources/clusterrole.yaml index 0647367fe..8714cd0ab 100644 --- a/deploy/foundation/hub/resources/clusterrole.yaml +++ b/deploy/foundation/hub/resources/clusterrole.yaml @@ -75,12 +75,6 @@ rules: - apiGroups: ["hive.openshift.io"] resources: [ "syncsets"] verbs: ["create", "update", "delete"] - - apiGroups: ["inventory.open-cluster-management.io"] - resources: ["baremetalassets", "baremetalassets/status"] - verbs: ["get", "list", "watch", "update", "delete"] - - apiGroups: ["inventory.open-cluster-management.io"] - resources: ["baremetalassets/finalizers"] - verbs: ["update"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "create", "update"] diff --git a/deploy/foundation/hub/resources/crds/inventory.open-cluster-management.io_baremetalassets.crd.yaml b/deploy/foundation/hub/resources/crds/inventory.open-cluster-management.io_baremetalassets.crd.yaml deleted file mode 100644 index f3dde09ce..000000000 --- a/deploy/foundation/hub/resources/crds/inventory.open-cluster-management.io_baremetalassets.crd.yaml +++ /dev/null @@ -1,150 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.0 - creationTimestamp: null - name: baremetalassets.inventory.open-cluster-management.io -spec: - group: inventory.open-cluster-management.io - names: - kind: BareMetalAsset - listKind: BareMetalAssetList - plural: baremetalassets - singular: baremetalasset - scope: Namespaced - preserveUnknownFields: false - versions: - - name: v1alpha1 - deprecated: true - deprecationWarning: "inventory.open-cluster-management.io/v1alpha1 BareMetalAsset is deprecated" - schema: - openAPIV3Schema: - description: BareMetalAsset is the Schema for the baremetalassets API - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: BareMetalAssetSpec defines the desired state of BareMetalAsset - type: object - properties: - bmc: - description: How do we connect to the BMC? - type: object - properties: - address: - description: Address holds the URL for accessing the controller on the network. - type: string - credentialsName: - description: The name of the secret containing the BMC credentials (requires keys "username" and "password"). - type: string - bootMACAddress: - description: Which MAC address will PXE boot? This is optional for some types, but required for libvirt VMs driven by vbmc. - type: string - pattern: '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}' - clusterDeployment: - description: ClusterDeployment which the asset belongs to. - type: object - x-kubernetes-preserve-unknown-fields: true - hardwareProfile: - description: What is the name of the hardware profile for this host? It should only be necessary to set this when inspection cannot automatically determine the profile. - type: string - role: - description: Role holds the role of the asset - type: string - enum: - - master - - worker - status: - description: BareMetalAssetStatus defines the observed state of BareMetalAsset - type: object - properties: - conditions: - description: Conditions describes the state of the BareMetalAsset resource. - type: array - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - type: object - required: - - lastTransitionTime - - message - - reason - - status - - type - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - type: string - format: date-time - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - type: string - maxLength: 32768 - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - type: integer - format: int64 - minimum: 0 - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - type: string - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - status: - description: status of the condition, one of True, False, Unknown. - type: string - enum: - - "True" - - "False" - - Unknown - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - relatedObjects: - description: RelatedObjects is a list of objects created and maintained by this operator. Object references will be added to this list after they have been created AND found in the cluster. - type: array - items: - description: 'ObjectReference contains enough information to let you inspect or modify the referred object. --- New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". Those cannot be well described when embedded. 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple and the version of the actual struct is irrelevant. 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type will affect numerous schemas. Don''t make new APIs embed an underspecified API type they do not control. Instead of using this type, create a locally provided and used type that is well-focused on your reference. For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .' - type: object - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' - type: string - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - resourceVersion: - description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' - type: string - uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' - type: string - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/deploy/foundation/hub/resources/kustomization.yaml b/deploy/foundation/hub/resources/kustomization.yaml index 0ffcf4af5..2372110b2 100644 --- a/deploy/foundation/hub/resources/kustomization.yaml +++ b/deploy/foundation/hub/resources/kustomization.yaml @@ -2,7 +2,6 @@ resources: - crds/action.open-cluster-management.io_managedclusteractions.crd.yaml - crds/internal.open-cluster-management.io_managedclusterinfos.crd.yaml - crds/imageregistry.open-cluster-management.io_managedclusterimageregistries.crd.yaml -- crds/inventory.open-cluster-management.io_baremetalassets.crd.yaml - crds/view.open-cluster-management.io_managedclusterviews.crd.yaml - crds/hive.openshift.io_syncsets.yaml - crds/hive.openshift.io_clusterdeployments.yaml diff --git a/docs/inventory/FAQs.md b/docs/inventory/FAQs.md deleted file mode 100644 index 59a54f29d..000000000 --- a/docs/inventory/FAQs.md +++ /dev/null @@ -1,23 +0,0 @@ -# Frequently Asked Questions - -## What problem does the inventory controller solve? - -We assume that each customer has their own set of tools (CSV files, spreadsheets, CMDB, etc.) to manage inventory data and there is not a common data-format that they use to store the data. This operator provides an API for customers to take inventory data from their existing systems in various formats to the multi-cluster inventory system so they are available for use in the baremetal clusters. - -It provides a CRD called BaremetalAsset that is used to hold the inventory assets in the hub cluster and a controller that reconciles the assets with resources in the managed cluster. That means each customer will need a custom tool outside of ACM to take inventory data from their existing systems and convert it into a format that BMA CRD wants. - -The customer will likely do a bulk-import of inventory records into the multicluster-inventory as BareMetalAssets (BMA). These BMAs do not have to be part of a cluster just yet, and they can just live in the namespace. Once it's time to add the inventory to a cluster, they can assign a Role and ClusterDeployment to the asset and the controller is responsible for updating the managed cluster with the corresponding BareMetalHost resources. - -## How does it fit into ACM? - -BareMetalAssets (BMAs) are added to multicluster-inventory system either through ACM UI or in bulk using the BareMetalAsset CRD. - -Once a user decides to create a cluster, they will use the ACM UI to select assets from the available BMAs and update the role (either "worker" or "master") and clusterDeployment association (name and namespace) for each BMA. Addition or removal of assets from the cluster happen in a similar way. - -## Where does the inventory-controller live? - -The inventory controller is packaged as part of controller operator. It can be enabled using the `--enable-inventory` flag. - -```bash -./controller --enable-inventory -``` diff --git a/docs/inventory/using-inventory.md b/docs/inventory/using-inventory.md deleted file mode 100644 index fcdd45f8e..000000000 --- a/docs/inventory/using-inventory.md +++ /dev/null @@ -1,147 +0,0 @@ -# Using BareMetal Inventory Asset - -The inventory controller defines a CRD called BareMetalAsset, which is used to hold inventory records for use in baremetal clusters. The controller runs in the hub cluster. The assets are created in a namespace there and the controller will be responsible for reconciling the inventory asset with BareMetalHost resources in the managed cluster. - -## Create a new inventory asset - -A BareMetalAsset (BMA) represents the hardware available for use in baremetal clusters. - -Create a BareMetalAsset in the default (or any) namespace. Each BMA also has a corresponding Secret that contains the BMC credentials and the secret name is referenced by bma.bmc.credentialsName. - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: worker-0-bmc-secret -type: Opaque -data: - username: YWRtaW4= - password: cGFzc3dvcmQ= -``` - -```yaml -apiVersion: inventory.open-cluster-management.io/v1alpha1 -kind: BareMetalAsset -metadata: - name: baremetalasset-worker-0 -spec: - bmc: - address: ipmi://192.168.122.1:6233 - credentialsName: worker-0-bmc-secret - bootMACAddress: "00:1B:44:11:3A:B7" - hardwareProfile: "hardwareProfile" -``` - -A look at the BareMetalAsset status shows that secret referenced was found and the BMA is not associated with any ClusterDeployment yet. - -```yaml -status: - conditions: - - lastHeartbeatTime: "2020-02-26T19:01:42Z" - lastTransitionTime: "2020-02-26T19:01:42Z" - message: A secret with the name worker-0-bmc-secret in namespace default was found - reason: SecretFound - status: "True" - type: CredentialsFound - - lastHeartbeatTime: "2020-02-26T19:01:42Z" - lastTransitionTime: "2020-02-26T19:01:42Z" - message: No cluster deployment specified - reason: NoneSpecified - status: "False" - type: ClusterDeploymentFound -``` - -Also, note that BMA object metadata has no values populated for the following labels yet. - -```yaml -metadata: - creationTimestamp: "2020-02-26T19:01:42Z" - finalizers: - - baremetalasset.inventory.open-cluster-management.io - generation: 2 - labels: - metal3.io/cluster-deployment-name: "" - metal3.io/cluster-deployment-namespace: "" - metal3.io/role: "" -``` - -## Add inventory to the cluster - -Each BMA can have a role and be associated with a clusterDeployment. Role can either be a "worker" or "master". - -Update the BMA spec with role set to worker, and clusterDeployment name and namespace set to the appropriate values of the Hive ClusterDeployment you want the BMA associated with. Eg. - -```yaml - clusterDeployment: - name: cluster0 - namespace: cluster0 - role: worker -``` - -You will notice that the metadata labels are updated to their appropriate values. With the labels set, a management application can look for BMAs with a partcular role and clusterDeployment and add them to the cluster. - -```yaml -apiVersion: inventory.open-cluster-management.io/v1alpha1 -kind: BareMetalAsset -metadata: - creationTimestamp: "2020-02-26T19:01:42Z" - finalizers: - - baremetalasset.inventory.open-cluster-management.io - generation: 4 - labels: - metal3.io/cluster-deployment-name: cluster0 - metal3.io/cluster-deployment-namespace: cluster0 - metal3.io/role: worker - name: baremetalasset-worker-0 - namespace: default - resourceVersion: "23751" - selfLink: /apis/inventory.open-cluster-management.io/v1alpha1/namespaces/default/baremetalassets/baremetalasset-worker-0 - uid: dd83e7c1-2882-4aa8-bb6f-a36cb428896c -spec: - bmc: - address: ipmi://192.168.122.1:6233 - credentialsName: worker-0-bmc-secret - bootMACAddress: 00:1B:44:11:3A:B7 - clusterDeployment: - creationTimestamp: null - name: cluster0 - namespace: cluster0 - hardwareProfile: hardwareProfile - role: worker -``` - -Once an asset is associated with a clusterDeployment, the controller creates a Hive SyncSet for each BMA in the namespace of clusterDeployment. The inventory controller maps the BareMetalAsset to a corresponding BareMetalHost resource in the SyncSet and the SyncSet is responsible for syncing the asset to a BareMetalHost resource in the managed cluster. - -Look at BareMetalAsset status conditions to view information about the success or failure of the operations. - -```bash -kubectl get baremetalassets baremetalasset-worker-0 -o yaml -``` - -You can also directly look at SyncSet and SyncSetInstances created by the controller. - -```bash -kubectl get syncsets -n cluster0 -o yaml -kubectl get syncsetinstances -n cluster0 -o yaml -``` - -You can verify the corresponding Secret and BareMetalHost resources are created on the managed cluster in the openshift-machine-api namespace. - -```bash -kubectl get secrets worker-0-bmc-secret -n openshift-machine-api -kubectl get baremetalhosts baremetalasset-worker-0 -n openshift-machine-api -``` - -## Remove inventory from the cluster - -To remove an asset from a cluster, you can simply remove or empty the namd and namespace values for clusterDeployment. The SyncSet will be deleted by the controller and BareMetalHost for the asset on the managed cluster will be deprovisioned and removed from the cluster. - -```yaml - clusterDeployment: - name: "" - namespace: "" -``` - -## Update Host information - -Any information in the BMA spec like credentials, bootMACAddress, etc. can be updated and the controller will deliver the updates to the managed clusters. \ No newline at end of file diff --git a/go.mod b/go.mod index f5841cabb..96fdf21b2 100644 --- a/go.mod +++ b/go.mod @@ -54,13 +54,11 @@ require ( github.com/go-logr/logr v1.2.3 github.com/gogo/protobuf v1.3.2 github.com/mattbaird/jsonpatch v0.0.0-20200820163806-098863c1fc24 - github.com/metal3-io/baremetal-operator/apis v0.0.0 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.19.0 github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 github.com/openshift/build-machinery-go v0.0.0-20220429084610-baff9f8d23b3 github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a - github.com/openshift/custom-resource-status v1.1.2 github.com/openshift/hive v1.1.17-0.20220726120844-e78dfd39116d github.com/openshift/hive/apis v0.0.0 github.com/openshift/library-go v0.0.0-20220525173854-9b950a41acdc @@ -68,7 +66,7 @@ require ( github.com/prometheus/common v0.32.1 github.com/smartystreets/goconvey v1.7.2 github.com/spf13/pflag v1.0.5 - github.com/stolostron/cluster-lifecycle-api v0.0.0-20220930080346-456dd8fcbea8 + github.com/stolostron/cluster-lifecycle-api v0.0.0-20221107031926-6f0a02d2aaf5 github.com/stretchr/testify v1.7.2 golang.org/x/net v0.0.0-20220722155237-a158d28d115b k8s.io/api v0.25.0 @@ -144,7 +142,6 @@ require ( github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/metal3-io/baremetal-operator/pkg/hardwareutils v0.0.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect diff --git a/go.sum b/go.sum index 82d9b5c37..ebb266784 100644 --- a/go.sum +++ b/go.sum @@ -178,7 +178,6 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= @@ -280,7 +279,6 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -435,29 +433,21 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/openshift/api v0.0.0-20220525145417-ee5b62754c68/go.mod h1:LEnw1IVscIxyDnltE3Wi7bQb/QzIM8BfPNKoGA1Qlxw= github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 h1:bkBOsI/Yd+cBT+/aXkbbNo+imvq4VKRusoCluIGOBBg= github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7/go.mod h1:LEnw1IVscIxyDnltE3Wi7bQb/QzIM8BfPNKoGA1Qlxw= -github.com/openshift/baremetal-operator/apis v0.0.0-20211201170610-92ffa60c683d h1:DHGXCvXWsPExutf3tgQYD4TVDSAOviLXO7Vnc42oXhw= -github.com/openshift/baremetal-operator/apis v0.0.0-20211201170610-92ffa60c683d/go.mod h1:CVSU+wS3oYrFJooMeiyDtTpatoXoKyXPE2YS5vT26vE= -github.com/openshift/baremetal-operator/pkg/hardwareutils v0.0.0-20211201170610-92ffa60c683d h1:ldAAKEQlOoIp8nqCnHyhVH5pMvgepP8gmN6Ve12ai7I= -github.com/openshift/baremetal-operator/pkg/hardwareutils v0.0.0-20211201170610-92ffa60c683d/go.mod h1:Q+r+xTc1jDcx/y61bVspJ9ANiAjJlsx/j+sL44mCB8w= github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/build-machinery-go v0.0.0-20220429084610-baff9f8d23b3 h1:M7ttIUk99wNgpBImKJEbE/QgmuRK+HZ4xUh8ZpH/r3Y= github.com/openshift/build-machinery-go v0.0.0-20220429084610-baff9f8d23b3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a h1:ylsEgoC8Dlg4A0C1TLH0A4x/TZao7k1YveLwROhRUdk= github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a/go.mod h1:eDO5QeVi2IiXmDwB0e2z1DpAznWroZKe978pzZwFBzg= -github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= -github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/openshift/hive v1.1.17-0.20220726120844-e78dfd39116d h1:KDVLSy/LC+Fwmv4tZRoEMJHkxFkbBeqdpZFXrEamsXc= github.com/openshift/hive v1.1.17-0.20220726120844-e78dfd39116d/go.mod h1:Allz11VtjQXgTzGsgRz5YvBH+F2bTD2wjECko8VVNzQ= github.com/openshift/hive/apis v0.0.0-20220726120844-e78dfd39116d h1:IePF0Xi/yLRN7bLXex12mLzWptYlE9HnQVoh2tVwkW0= @@ -556,8 +546,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/stolostron/cluster-lifecycle-api v0.0.0-20220930080346-456dd8fcbea8 h1:uEQYpp/+UMqGK2Nxbr93YE7gWD8ndAJC5dxm0gzULJk= -github.com/stolostron/cluster-lifecycle-api v0.0.0-20220930080346-456dd8fcbea8/go.mod h1:pNeVzujoHsTHDloNHVfp1QPYlQy8MkXMuuZme96/x8M= +github.com/stolostron/cluster-lifecycle-api v0.0.0-20221107031926-6f0a02d2aaf5 h1:+6YaBfE6Zy7du0LOxrdMEp96sXrNqlKU4kOBXxf5yqo= +github.com/stolostron/cluster-lifecycle-api v0.0.0-20221107031926-6f0a02d2aaf5/go.mod h1:pNeVzujoHsTHDloNHVfp1QPYlQy8MkXMuuZme96/x8M= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -705,7 +695,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -905,7 +894,6 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= @@ -1009,7 +997,6 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= diff --git a/hack/init.sh b/hack/init.sh index 4c9474716..e91099e54 100755 --- a/hack/init.sh +++ b/hack/init.sh @@ -7,6 +7,5 @@ set -o pipefail CRD_FILES="./vendor/github.com/stolostron/cluster-lifecycle-api/action/v1beta1/action.open-cluster-management.io_managedclusteractions.crd.yaml ./vendor/github.com/stolostron/cluster-lifecycle-api/clusterinfo/v1beta1/internal.open-cluster-management.io_managedclusterinfos.crd.yaml ./vendor/github.com/stolostron/cluster-lifecycle-api/imageregistry/v1alpha1/imageregistry.open-cluster-management.io_managedclusterimageregistries.crd.yaml -./vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/inventory.open-cluster-management.io_baremetalassets.crd.yaml ./vendor/github.com/stolostron/cluster-lifecycle-api/view/v1beta1/view.open-cluster-management.io_managedclusterviews.crd.yaml " diff --git a/pkg/controllers/inventory/deleting_clusterdeployment_controller.go b/pkg/controllers/inventory/deleting_clusterdeployment_controller.go deleted file mode 100644 index 34e602b02..000000000 --- a/pkg/controllers/inventory/deleting_clusterdeployment_controller.go +++ /dev/null @@ -1,158 +0,0 @@ -package inventory - -import ( - "context" - - hivev1 "github.com/openshift/hive/apis/hive/v1" - inventoryv1alpha1 "github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1" - "github.com/stolostron/multicloud-operators-foundation/pkg/utils" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -// BareMetalAssetFinalizer is the finalizer used on BareMetalAsset resource -const BareMetalAssetFinalizer = "baremetalasset.inventory.open-cluster-management.io" - -// newReconciler returns a new reconcile.Reconciler -func newCDReconciler(mgr manager.Manager) reconcile.Reconciler { - return &ReconcileClusterDeployment{client: mgr.GetClient(), scheme: mgr.GetScheme()} -} - -func addCDReconciler(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New("baremetalasset-controller", mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - - // Watch for changes to primary resource BareMetalAsset - err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, &handler.EnqueueRequestForObject{}) - if err != nil { - return err - } - - err = c.Watch( - &source.Kind{Type: &inventoryv1alpha1.BareMetalAsset{}}, - handler.EnqueueRequestsFromMapFunc( - handler.MapFunc(func(a client.Object) []reconcile.Request { - bma, ok := a.(*inventoryv1alpha1.BareMetalAsset) - if !ok { - // not a Deployment, returning empty - klog.Error("bma handler received bma object") - return []reconcile.Request{} - } - var requests []reconcile.Request - if bma.Spec.ClusterDeployment.Name != "" && bma.Spec.ClusterDeployment.Namespace != "" { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: bma.Spec.ClusterDeployment.Name, - Namespace: bma.Spec.ClusterDeployment.Namespace, - }, - }) - } - return requests - }), - )) - if err != nil { - return err - } - - return nil -} - -// ReconcileClusterDeployment reconciles a ClusterDeployment object -type ReconcileClusterDeployment struct { - // This client, initialized using mgr.Client() above, is a split client - // that reads objects from the cache and writes to the apiserver - client client.Client - scheme *runtime.Scheme -} - -func (r *ReconcileClusterDeployment) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.Info("Reconciling ClusterDeployment") - // Fetch the BareMetalAsset instance - instance := &hivev1.ClusterDeployment{} - err := r.client.Get(ctx, request.NamespacedName, instance) - if err != nil { - if errors.IsNotFound(err) { - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. - // Return and don't requeue - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, err - } - - if !instance.GetDeletionTimestamp().IsZero() { - return reconcile.Result{}, r.removeClusterDeploymentFinalizer(ctx, instance) - } - - bmas := &inventoryv1alpha1.BareMetalAssetList{} - err = r.client.List(ctx, bmas, - client.MatchingLabels{ - ClusterDeploymentNameLabel: instance.Name, - ClusterDeploymentNamespaceLabel: instance.Namespace, - }) - if err != nil { - return reconcile.Result{}, err - } - - // only add finalizer when there is ref bma - if len(bmas.Items) == 0 { - return reconcile.Result{}, nil - } - - if !contains(instance.GetFinalizers(), BareMetalAssetFinalizer) { - klog.Info("Finalizer not found for BareMetalAsset. Adding finalizer") - instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, BareMetalAssetFinalizer) - return reconcile.Result{}, r.client.Update(ctx, instance) - } - - return reconcile.Result{}, nil -} - -func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(ctx context.Context, instance *hivev1.ClusterDeployment) error { - if !contains(instance.GetFinalizers(), BareMetalAssetFinalizer) { - return nil - } - - bmas := &inventoryv1alpha1.BareMetalAssetList{} - err := r.client.List(ctx, bmas, - client.MatchingLabels{ - ClusterDeploymentNameLabel: instance.Name, - ClusterDeploymentNamespaceLabel: instance.Namespace, - }) - if err != nil { - return err - } - - errs := []error{} - for _, bma := range bmas.Items { - bmaCopy := bma.DeepCopy() - if len(bmaCopy.Spec.ClusterDeployment.Name) == 0 && len(bmaCopy.Spec.ClusterDeployment.Namespace) == 0 { - continue - } - bmaCopy.Spec.ClusterDeployment.Name = "" - bmaCopy.Spec.ClusterDeployment.Namespace = "" - err := r.client.Update(ctx, bmaCopy) - if err != nil { - errs = append(errs, err) - } - } - - if len(errs) > 0 { - return utils.NewMultiLineAggregate(errs) - } - - instance.ObjectMeta.Finalizers = remove(instance.ObjectMeta.Finalizers, BareMetalAssetFinalizer) - return r.client.Update(ctx, instance) -} diff --git a/pkg/controllers/inventory/deleting_clusterdeployment_controller_test.go b/pkg/controllers/inventory/deleting_clusterdeployment_controller_test.go deleted file mode 100644 index c6a3ecd3e..000000000 --- a/pkg/controllers/inventory/deleting_clusterdeployment_controller_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package inventory - -import ( - "context" - "testing" - - "k8s.io/apimachinery/pkg/api/errors" - - hivev1 "github.com/openshift/hive/apis/hive/v1" - inventoryv1alpha1 "github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func newTestCDReconciler(existingObjs []client.Object) (*ReconcileClusterDeployment, client.Client) { - fakeClient := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(existingObjs...).Build() - rbma := &ReconcileClusterDeployment{ - client: fakeClient, - scheme: scheme.Scheme, - } - return rbma, fakeClient -} - -func TestCDReconcile(t *testing.T) { - ctx := context.Background() - tests := []struct { - name string - existingObjs []client.Object - expectedErrorType error - expectedFinalizer []string - req reconcile.Request - requeue bool - }{ - { - name: "do not add finalizer", - existingObjs: []client.Object{ - newClusterDeployment(), - }, - expectedErrorType: nil, - req: reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: testName, - Namespace: testNamespace, - }, - }, - expectedFinalizer: []string{}, - }, - { - name: "add finalizer", - existingObjs: []client.Object{ - newClusterDeployment(), - func() *inventoryv1alpha1.BareMetalAsset { - bma := newBMAWithClusterDeployment() - bma.Labels = map[string]string{ - ClusterDeploymentNameLabel: testName, - ClusterDeploymentNamespaceLabel: testNamespace, - } - return bma - }(), - }, - expectedErrorType: nil, - req: reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: testName, - Namespace: testNamespace, - }, - }, - expectedFinalizer: []string{BareMetalAssetFinalizer}, - }, - { - name: "remove finalizer with no bma", - existingObjs: []client.Object{ - func() *hivev1.ClusterDeployment { - cd := newClusterDeployment() - now := metav1.Now() - cd.DeletionTimestamp = &now - return cd - }(), - }, - expectedErrorType: nil, - req: reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: testName, - Namespace: testNamespace, - }, - }, - expectedFinalizer: []string{}, - }, - { - name: "remove finalizer with bma", - existingObjs: []client.Object{ - func() *hivev1.ClusterDeployment { - cd := newClusterDeployment() - now := metav1.Now() - cd.DeletionTimestamp = &now - cd.Finalizers = []string{BareMetalAssetFinalizer} - return cd - }(), - func() *inventoryv1alpha1.BareMetalAsset { - bma := newBMAWithClusterDeployment() - bma.Labels = map[string]string{ - ClusterDeploymentNameLabel: testName, - ClusterDeploymentNamespaceLabel: testNamespace, - } - return bma - }(), - }, - expectedErrorType: nil, - req: reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: testName, - Namespace: testNamespace, - }, - }, - expectedFinalizer: []string{}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - rbma, client := newTestCDReconciler(test.existingObjs) - _, err := rbma.Reconcile(ctx, test.req) - validateErrorAndStatusConditions(t, err, test.expectedErrorType, nil, nil) - cd := &hivev1.ClusterDeployment{} - err = client.Get(context.TODO(), types.NamespacedName{ - Name: testName, - Namespace: testNamespace, - }, cd) - if !errors.IsNotFound(err) { - validateErrorAndStatusConditions(t, err, nil, nil, nil) - if len(cd.Finalizers) != len(test.expectedFinalizer) { - t.Errorf("finalizer is not correct, actual %v, expected %v", cd.Finalizers, test.expectedFinalizer) - } - } - }) - } -} diff --git a/pkg/controllers/inventory/errors/errors_test.go b/pkg/controllers/inventory/errors/errors_test.go deleted file mode 100644 index 9def7b76a..000000000 --- a/pkg/controllers/inventory/errors/errors_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package errors - -import ( - "errors" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_NoClusterSpecifiedError(t *testing.T) { - Convey("test NoClusterSpecifiedError", t, func() { - Convey("test NoClusterSpecifiedError case1", func() { - err := NewNoClusterError() - msg := err.Error() - So(msg, ShouldEqual, "No cluster specified") - }) - Convey("test NoClusterSpecifiedError case2", func() { - err := errors.New("no cluster specified") - rst := IsNoClusterError(err) - So(rst, ShouldBeFalse) - }) - }) -} - -func Test_AssetSecretNotFoundError(t *testing.T) { - Convey("test AssetSecretNotFoundError", t, func() { - Convey("test AssetSecretNotFoundError case1", func() { - err := NewAssetSecretNotFoundError("foo", "default") - msg := err.Error() - So(msg, ShouldEqual, "Secret foo not found in namespace default") - }) - Convey("test AssetSecretNotFoundError case2", func() { - err := errors.New("no cluster specified") - rst := IsAssetSecretNotFoundError(err) - So(rst, ShouldBeFalse) - }) - }) -} diff --git a/pkg/controllers/inventory/errors/erros.go b/pkg/controllers/inventory/errors/erros.go deleted file mode 100644 index 6e7d7a314..000000000 --- a/pkg/controllers/inventory/errors/erros.go +++ /dev/null @@ -1,53 +0,0 @@ -package errors - -import ( - "fmt" -) - -// NoClusterSpecifiedError is an error when no cluster is specified -type NoClusterSpecifiedError struct { - Message string -} - -// Error returns a string representation of the NoClusterSpecifiedError -func (e NoClusterSpecifiedError) Error() string { - return e.Message -} - -// NewNoClusterError returns a NoClusterSpecifiedError -func NewNoClusterError() error { - return &NoClusterSpecifiedError{ - Message: "No cluster specified", - } -} - -// IsNoClusterError returns true if the err is a NoClusterSpecifiedError -func IsNoClusterError(err error) bool { - _, ok := err.(*NoClusterSpecifiedError) - return ok -} - -// AssetSecretNotFoundError is an error when the asset's secret can not be found -type AssetSecretNotFoundError struct { - Name string - Namespace string -} - -// Error returns a string representation of the AssetSecretNotFoundError -func (e AssetSecretNotFoundError) Error() string { - return fmt.Sprintf("Secret %v not found in namespace %v", e.Name, e.Namespace) -} - -// NewAssetSecretNotFoundError returns a AssetSecretNotFoundError -func NewAssetSecretNotFoundError(name, namespace string) error { - return &AssetSecretNotFoundError{ - Name: name, - Namespace: namespace, - } -} - -// IsAssetSecretNotFoundError returns true if the err is a AssetSecretNotFoundError -func IsAssetSecretNotFoundError(err error) bool { - _, ok := err.(*AssetSecretNotFoundError) - return ok -} diff --git a/pkg/controllers/inventory/inventory_controller.go b/pkg/controllers/inventory/inventory_controller.go deleted file mode 100644 index b79b3eff3..000000000 --- a/pkg/controllers/inventory/inventory_controller.go +++ /dev/null @@ -1,856 +0,0 @@ -package inventory - -import ( - "context" - "encoding/json" - "fmt" - "reflect" - "strings" - "time" - - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/client-go/util/retry" - - metal3v1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" - objectreferencesv1 "github.com/openshift/custom-resource-status/objectreferences/v1" - hivev1 "github.com/openshift/hive/apis/hive/v1" - hiveinternalv1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" - "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" - inventoryv1alpha1 "github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1" - bmaerrors "github.com/stolostron/multicloud-operators-foundation/pkg/controllers/inventory/errors" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/reference" - "k8s.io/klog" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - k8slabels "github.com/stolostron/multicloud-operators-foundation/pkg/utils" -) - -const ( - // RoleLabel is the key name for the role label associated with the asset - RoleLabel = "metal3.io/role" - // ClusterDeploymentNameLabel is the key name for the name label associated with the asset's clusterDeployment - ClusterDeploymentNameLabel = "metal3.io/cluster-deployment-name" - // ClusterDeploymentNamespaceLabel is the key name for the namespace label associated with the asset's clusterDeployment - ClusterDeploymentNamespaceLabel = "metal3.io/cluster-deployment-namespace" - // BareMetalHostKind contains the value of kind BareMetalHost - BareMetalHostKind = "BareMetalHost" -) - -const ( - // assetSecretRequeueAfter specifies the amount of time, in seconds, before requeue - assetSecretRequeueAfter int = 60 -) - -func SetupWithManager(mgr manager.Manager) error { - if err := addBMAReconciler(mgr, newBMAReconciler(mgr)); err != nil { - klog.Errorf("Failed to create baremetalasset controller, %v", err) - return err - } - if err := addCDReconciler(mgr, newCDReconciler(mgr)); err != nil { - klog.Errorf("Failed to create baremetalasset controller, %v", err) - return err - } - return nil -} - -// newReconciler returns a new reconcile.Reconciler -func newBMAReconciler(mgr manager.Manager) reconcile.Reconciler { - return &ReconcileBareMetalAsset{client: mgr.GetClient(), scheme: mgr.GetScheme()} -} - -// add adds a new Controller to mgr with r as the reconcile.Reconciler -func addBMAReconciler(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New("baremetalasset-controller", mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - - // Watch for changes to primary resource BareMetalAsset - err = c.Watch(&source.Kind{Type: &inventoryv1alpha1.BareMetalAsset{}}, &handler.EnqueueRequestForObject{}) - if err != nil { - return err - } - - // Watch for changes to SyncSets and requeue BareMetalAssets with the name and matching cluster-deployment-namespace label - // (which is also the syncset namespace) - err = c.Watch( - &source.Kind{Type: &hivev1.SyncSet{}}, - handler.EnqueueRequestsFromMapFunc( - handler.MapFunc(func(a client.Object) []reconcile.Request { - syncSet, ok := a.(*hivev1.SyncSet) - if !ok { - // not a SyncSet, returning empty - klog.Error("SyncSet handler received non-SyncSet object") - return []reconcile.Request{} - } - bmas := &inventoryv1alpha1.BareMetalAssetList{} - err := mgr.GetClient().List(context.TODO(), bmas, - client.MatchingLabels{ - ClusterDeploymentNamespaceLabel: syncSet.Namespace, - }) - if err != nil { - klog.Errorf("Could not list BareMetalAsset %v with label %v=%v, %v", - syncSet.Name, ClusterDeploymentNamespaceLabel, syncSet.Namespace, err) - } - var requests []reconcile.Request - for _, bma := range bmas.Items { - if syncSet.Name == bma.Name { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: bma.Name, - Namespace: bma.Namespace, - }, - }) - } - } - return requests - }, - ), - ), - ) - if err != nil { - return err - } - - // Watch for changes to ClusterSync - err = c.Watch( - &source.Kind{Type: &hiveinternalv1alpha1.ClusterSync{}}, - handler.EnqueueRequestsFromMapFunc( - handler.MapFunc(func(a client.Object) []reconcile.Request { - clusterSync, ok := a.(*hiveinternalv1alpha1.ClusterSync) - if !ok { - // not a ClusterSync, returning empty - klog.Error("ClusterSync handler received non-ClusterSync object") - return []reconcile.Request{} - } - bmas := &inventoryv1alpha1.BareMetalAssetList{} - err := mgr.GetClient().List(context.TODO(), bmas, client.InNamespace(clusterSync.Namespace)) - if err != nil { - klog.Error("Could not list BareMetalAssets", err) - } - var requests []reconcile.Request - for _, bma := range bmas.Items { - if bma.Spec.ClusterDeployment.Name == clusterSync.Name { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: bma.Name, - Namespace: bma.Namespace, - }, - }) - } - } - return requests - }), - )) - if err != nil { - return err - } - - // Watch for changes to ClusterDeployments and requeue BareMetalAssets with labels set to - // ClusterDeployment's name (which is expected to be the clusterName) - err = c.Watch( - &source.Kind{Type: &hivev1.ClusterDeployment{}}, - handler.EnqueueRequestsFromMapFunc( - handler.MapFunc(func(a client.Object) []reconcile.Request { - clusterDeployment, ok := a.(*hivev1.ClusterDeployment) - if !ok { - // not a Deployment, returning empty - klog.Error("ClusterDeployment handler received non-ClusterDeployment object") - return []reconcile.Request{} - } - bmas := &inventoryv1alpha1.BareMetalAssetList{} - err := mgr.GetClient().List(context.TODO(), bmas, - client.MatchingLabels{ - ClusterDeploymentNameLabel: clusterDeployment.Name, - ClusterDeploymentNamespaceLabel: clusterDeployment.Namespace, - }) - if err != nil { - klog.Errorf("could not list BareMetalAssets with label %v=%v, %v", - ClusterDeploymentNameLabel, clusterDeployment.Name, err) - } - var requests []reconcile.Request - for _, bma := range bmas.Items { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: bma.Name, - Namespace: bma.Namespace, - }, - }) - } - return requests - }), - )) - - if err != nil { - return err - } - - return nil -} - -// blank assignment to verify that ReconcileBareMetalAsset implements reconcile.Reconciler -var _ reconcile.Reconciler = &ReconcileBareMetalAsset{} - -// ReconcileBareMetalAsset reconciles a BareMetalAsset object -type ReconcileBareMetalAsset struct { - // This client, initialized using mgr.Client() above, is a split client - // that reads objects from the cache and writes to the apiserver - client client.Client - scheme *runtime.Scheme -} - -// Reconcile reads that state of the cluster for a BareMetalAsset object and makes changes based on the state read -// and what is in the BareMetalAsset.Spec -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileBareMetalAsset) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.Info("Reconciling BareMetalAsset") - - // Fetch the BareMetalAsset instance - instance := &inventoryv1alpha1.BareMetalAsset{} - err := r.client.Get(ctx, request.NamespacedName, instance) - if err != nil { - if errors.IsNotFound(err) { - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. - // Return and don't requeue - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, err - } - - // Check DeletionTimestamp to determine if object is under deletion - if instance.GetDeletionTimestamp().IsZero() { - if !contains(instance.GetFinalizers(), BareMetalAssetFinalizer) { - klog.Info("Finalizer not found for BareMetalAsset. Adding finalizer") - instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, BareMetalAssetFinalizer) - if err := r.client.Update(ctx, instance); err != nil { - klog.Errorf("Failed to add finalizer to baremetalasset, %v", err) - return reconcile.Result{}, err - } - } - } else { - // The object is being deleted - if contains(instance.GetFinalizers(), BareMetalAssetFinalizer) { - return r.deleteSyncSet(ctx, instance) - } - return reconcile.Result{}, nil - } - - for _, f := range []func(context.Context, *inventoryv1alpha1.BareMetalAsset) error{ - r.ensureLabels, - r.checkAssetSecret, - r.cleanupOldHiveSyncSet, - r.checkClusterDeployment, - r.ensureHiveSyncSet, - } { - err = f(ctx, instance) - if err != nil { - switch { - case bmaerrors.IsNoClusterError(err): - klog.Info("No cluster specified") - return reconcile.Result{}, r.updateStatus(ctx, instance) - case bmaerrors.IsAssetSecretNotFoundError(err): - // since we won't be notified when the secret is created, requeue after some time - klog.Infof("Secret not found, RequeueAfter.Duration %v seconds", assetSecretRequeueAfter) - return reconcile.Result{RequeueAfter: time.Duration(assetSecretRequeueAfter) * time.Second}, - r.updateStatus(ctx, instance) - } - - klog.Errorf("Failed reconcile, %v", err) - if statusErr := r.updateStatus(ctx, instance); statusErr != nil { - klog.Errorf("Failed to update status, %v", statusErr) - } - - return reconcile.Result{}, err - } - } - - klog.Info("BareMetalAsset Reconciled") - return reconcile.Result{}, r.updateStatus(ctx, instance) -} - -func (r *ReconcileBareMetalAsset) updateStatus(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) error { - err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - newInstance := &inventoryv1alpha1.BareMetalAsset{} - err := r.client.Get(ctx, types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, newInstance) - if err != nil { - if errors.IsNotFound(err) { - return nil - } - return err - } - if equality.Semantic.DeepEqual(newInstance.Status, instance.Status) { - return nil - } - newInstance.Status = instance.Status - return r.client.Status().Update(ctx, newInstance) - }) - return err -} - -// checkAssetSecret verifies that we can find the secret listed in the BareMetalAsset -func (r *ReconcileBareMetalAsset) checkAssetSecret(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) error { - secretName := instance.Spec.BMC.CredentialsName - - secret := &corev1.Secret{} - err := r.client.Get(ctx, types.NamespacedName{Name: secretName, Namespace: instance.Namespace}, secret) - if err != nil { - if errors.IsNotFound(err) { - klog.Errorf("Secret (%s/%s) not found, %v", instance.Namespace, secretName, err) - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionCredentialsFound, - Status: metav1.ConditionFalse, - Reason: inventoryv1alpha1.ConditionReasonSecretNotFound, - Message: err.Error(), - }) - return bmaerrors.NewAssetSecretNotFoundError(secretName, instance.Namespace) - } - return err - } - - // add secret reference to status - secretRef, err := reference.GetReference(r.scheme, secret) - if err != nil { - klog.Errorf("Failed to get reference from secret, %v", err) - return err - } - if err := objectreferencesv1.SetObjectReference(&instance.Status.RelatedObjects, *secretRef); err != nil { - klog.Errorf("Failed to set reference, %v", err) - return err - } - - // add condition to status - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionCredentialsFound, - Status: metav1.ConditionTrue, - Reason: inventoryv1alpha1.ConditionReasonSecretFound, - Message: fmt.Sprintf("A secret with the name %v in namespace %v was found", secretName, instance.Namespace), - }) - - // Set BaremetalAsset instance as the owner and controller - modified := false - if secret.OwnerReferences == nil || len(secret.OwnerReferences) == 0 { - modified = true - if err := controllerutil.SetControllerReference(instance, secret, r.scheme); err != nil { - klog.Errorf("Failed to set ControllerReference, %v", err) - return err - } - } - - secretTypeLabel := map[string]string{ - "cluster.open-cluster-management.io/backup": "", - } - - resourcemerge.MergeMap(&modified, &secret.Labels, secretTypeLabel) - - if modified { - return r.client.Update(ctx, secret) - } - return nil -} - -func (r *ReconcileBareMetalAsset) ensureLabels(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) error { - labels := k8slabels.CloneAndAddLabel(instance.Labels, ClusterDeploymentNameLabel, instance.Spec.ClusterDeployment.Name) - labels = k8slabels.AddLabel(labels, ClusterDeploymentNamespaceLabel, instance.Spec.ClusterDeployment.Namespace) - labels = k8slabels.AddLabel(labels, RoleLabel, string(instance.Spec.Role)) - - if !reflect.DeepEqual(labels, instance.Labels) { - instance.Labels = labels - return r.client.Update(ctx, instance) - } - return nil -} - -// checkClusterDeployment verifies that we can find the ClusterDeployment specified in the BareMetalAsset -func (r *ReconcileBareMetalAsset) checkClusterDeployment(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) error { - clusterDeploymentName := instance.Spec.ClusterDeployment.Name - clusterDeploymentNamespace := instance.Spec.ClusterDeployment.Namespace - - // if the clusterDeploymentName is not specified, we need to handle the possibility - // that it has been removed from the spec - if clusterDeploymentName == "" { - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionClusterDeploymentFound, - Status: metav1.ConditionFalse, - Reason: inventoryv1alpha1.ConditionReasonNoneSpecified, - Message: "No cluster deployment specified", - }) - meta.RemoveStatusCondition(&instance.Status.Conditions, inventoryv1alpha1.ConditionAssetSyncStarted) - meta.RemoveStatusCondition(&instance.Status.Conditions, inventoryv1alpha1.ConditionAssetSyncCompleted) - - return bmaerrors.NewNoClusterError() - } - - // If a clusterDeployment is specified, we need to find it - cd := &hivev1.ClusterDeployment{} - err := r.client.Get( - ctx, types.NamespacedName{Name: clusterDeploymentName, Namespace: clusterDeploymentNamespace}, cd) - if err != nil { - if errors.IsNotFound(err) { - klog.Errorf("ClusterDeployment (%s/%s) not found, %v", clusterDeploymentNamespace, clusterDeploymentName, err) - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionClusterDeploymentFound, - Status: metav1.ConditionFalse, - Reason: inventoryv1alpha1.ConditionReasonClusterDeploymentNotFound, - Message: err.Error(), - }) - return err - } - return err - } - - // add condition - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionClusterDeploymentFound, - Status: metav1.ConditionTrue, - Reason: inventoryv1alpha1.ConditionReasonClusterDeploymentFound, - Message: fmt.Sprintf("A ClusterDeployment with the name %v in namespace %v was found", cd.Name, cd.Namespace), - }) - - return nil -} - -func (r *ReconcileBareMetalAsset) ensureHiveSyncSet(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) error { - assetSyncCompleted := r.checkHiveClusterSync(ctx, instance) - hsc := r.newHiveSyncSet(instance, assetSyncCompleted) - found := &hivev1.SyncSet{} - err := r.client.Get(ctx, types.NamespacedName{Name: hsc.Name, Namespace: hsc.Namespace}, found) - if err != nil { - if errors.IsNotFound(err) { - err := r.client.Create(ctx, hsc) - if err != nil { - klog.Errorf("Failed to create Hive SyncSet, %v", err) - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncStarted, - Status: metav1.ConditionFalse, - Reason: inventoryv1alpha1.ConditionReasonSyncSetCreationFailed, - Message: "Failed to create SyncSet", - }) - return err - } - - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncStarted, - Status: metav1.ConditionTrue, - Reason: inventoryv1alpha1.ConditionReasonSyncSetCreated, - Message: "SyncSet created successfully", - }) - return nil - } - // other error. fail reconcile - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncStarted, - Status: metav1.ConditionFalse, - Reason: inventoryv1alpha1.ConditionReasonSyncSetGetFailed, - Message: "Failed to get SyncSet", - }) - klog.Errorf("Failed to get Hive SyncSet (%s/%s), %v", hsc.Namespace, hsc.Name, err) - return err - } - // rebuild the expected SyncSet if the one we found is missing Resources - // because it means we have successfully applied - if len(found.Spec.SyncSetCommonSpec.Resources) == 0 { - hsc = r.newHiveSyncSet(instance, true) - } - - // Add SyncSet to related objects - hscRef, err := reference.GetReference(r.scheme, found) - if err != nil { - klog.Errorf("Failed to get reference from SyncSet, %v", err) - return err - } - if err := objectreferencesv1.SetObjectReference(&instance.Status.RelatedObjects, *hscRef); err != nil { - klog.Errorf("Failed to set reference, %v", err) - return err - } - - // Add labels to copy for comparison to minimize updates - labels := k8slabels.CloneAndAddLabel(found.Labels, ClusterDeploymentNameLabel, instance.Spec.ClusterDeployment.Name) - labels = k8slabels.AddLabel(labels, ClusterDeploymentNamespaceLabel, instance.Spec.ClusterDeployment.Namespace) - labels = k8slabels.AddLabel(labels, RoleLabel, string(instance.Spec.Role)) - - // Update Hive SyncSet CR if it is not in the desired state - if !reflect.DeepEqual(hsc.Spec, found.Spec) || !reflect.DeepEqual(labels, found.Labels) { - klog.Infof("Updating Hive SyncSet (%s/%s)", hsc.Namespace, hsc.Name) - - found.Labels = labels - found.Spec = hsc.Spec - - err := r.client.Update(ctx, found) - if err != nil { - klog.Errorf("Failed to update Hive SyncSet (%s/%s), %v", hsc.Namespace, hsc.Name, err) - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncStarted, - Status: metav1.ConditionFalse, - Reason: inventoryv1alpha1.ConditionReasonSyncSetUpdateFailed, - Message: "Failed to update SyncSet", - }) - return err - } - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncStarted, - Status: metav1.ConditionTrue, - Reason: inventoryv1alpha1.ConditionReasonSyncSetUpdated, - Message: "SyncSet updated successfully", - }) - } - return nil -} - -func (r *ReconcileBareMetalAsset) newHiveSyncSet(instance *inventoryv1alpha1.BareMetalAsset, assetSyncCompleted bool) *hivev1.SyncSet { - bmhJSON, err := newBareMetalHost(instance, assetSyncCompleted) - if err != nil { - klog.Errorf("Error marshaling baremetalhost, %v", err) - return nil - } - - hsc := &hivev1.SyncSet{ - TypeMeta: metav1.TypeMeta{ - Kind: "SyncSet", - APIVersion: "hive.openshift.io/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: instance.Name, - Namespace: instance.Spec.ClusterDeployment.Namespace, // syncset should be created in the same namespace as the clusterdeployment - Labels: map[string]string{ - ClusterDeploymentNameLabel: instance.Spec.ClusterDeployment.Name, - ClusterDeploymentNamespaceLabel: instance.Spec.ClusterDeployment.Namespace, - RoleLabel: string(instance.Spec.Role), - }, - }, - Spec: hivev1.SyncSetSpec{ - SyncSetCommonSpec: hivev1.SyncSetCommonSpec{ - Resources: []runtime.RawExtension{ - { - Raw: bmhJSON, - }, - }, - Patches: []hivev1.SyncObjectPatch{}, - ResourceApplyMode: hivev1.SyncResourceApplyMode, - Secrets: []hivev1.SecretMapping{ - { - SourceRef: hivev1.SecretReference{ - Name: instance.Spec.BMC.CredentialsName, - Namespace: instance.Namespace, - }, - TargetRef: hivev1.SecretReference{ - Name: instance.Spec.BMC.CredentialsName, - Namespace: inventoryv1alpha1.ManagedClusterResourceNamespace, - }, - }, - }, - }, - ClusterDeploymentRefs: []corev1.LocalObjectReference{ - { - Name: instance.Spec.ClusterDeployment.Name, - }, - }, - }, - } - - if assetSyncCompleted { - // Do not delete the BareMetalHost that we are about to remove - hsc.Spec.SyncSetCommonSpec.ResourceApplyMode = hivev1.UpsertResourceApplyMode - // Remove the BareMetalHost from the list of resources to sync - hsc.Spec.SyncSetCommonSpec.Resources = []runtime.RawExtension{} - // Specify the BareMetalHost as a patch - hsc.Spec.SyncSetCommonSpec.Patches = []hivev1.SyncObjectPatch{ - { - APIVersion: metal3v1alpha1.GroupVersion.String(), - Kind: BareMetalHostKind, - Name: instance.Name, - Namespace: inventoryv1alpha1.ManagedClusterResourceNamespace, - Patch: string(bmhJSON), - PatchType: "merge", - }, - } - } - return hsc -} - -func newBareMetalHost(instance *inventoryv1alpha1.BareMetalAsset, assetSyncCompleted bool) ([]byte, error) { - bmhSpec := map[string]interface{}{ - "bmc": map[string]string{ - "address": instance.Spec.BMC.Address, - "credentialsName": instance.Spec.BMC.CredentialsName, - }, - "hardwareProfile": instance.Spec.HardwareProfile, - "bootMACAddress": instance.Spec.BootMACAddress, - } - if !assetSyncCompleted { - bmhSpec["online"] = true - } - - bmhJSON, err := json.Marshal(map[string]interface{}{ - "kind": BareMetalHostKind, - "apiVersion": metal3v1alpha1.GroupVersion.String(), - "metadata": map[string]interface{}{ - "name": instance.Name, - "namespace": inventoryv1alpha1.ManagedClusterResourceNamespace, - "labels": map[string]string{ - ClusterDeploymentNameLabel: instance.Spec.ClusterDeployment.Name, - ClusterDeploymentNamespaceLabel: instance.Spec.ClusterDeployment.Namespace, - RoleLabel: string(instance.Spec.Role), - }, - }, - "spec": bmhSpec, - }) - if err != nil { - return []byte{}, err - } - - return bmhJSON, nil -} - -func (r *ReconcileBareMetalAsset) checkHiveClusterSync(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) bool { - // get related syncSet - syncSetNsN := types.NamespacedName{ - Name: instance.Name, - Namespace: instance.Spec.ClusterDeployment.Namespace, - } - foundSyncSet := &hivev1.SyncSet{} - err := r.client.Get(ctx, syncSetNsN, foundSyncSet) - if err != nil { - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncCompleted, - Status: metav1.ConditionFalse, - Reason: inventoryv1alpha1.ConditionReasonSyncStatusNotFound, - Message: fmt.Sprintf("Problem getting Hive SyncSet for Name %s in Namespace %s, %v", - syncSetNsN.Name, syncSetNsN.Namespace, err), - }) - return false - } - - // get related clusterSync - clusterSyncNsN := types.NamespacedName{ - Name: instance.Spec.ClusterDeployment.Name, - Namespace: instance.Spec.ClusterDeployment.Namespace, - } - - foundClusterSync := &hiveinternalv1alpha1.ClusterSync{} - if r.client.Get(ctx, clusterSyncNsN, foundClusterSync) != nil { - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncCompleted, - Status: metav1.ConditionFalse, - Reason: inventoryv1alpha1.ConditionReasonSyncStatusNotFound, - Message: fmt.Sprintf("Problem getting Hive ClusterSync for ClusterDeployment.Name %s in Namespace %s, %v", - clusterSyncNsN.Name, clusterSyncNsN.Namespace, err), - }) - return false - } - - // find locate the correct syncstatus - foundSyncStatuses := []hiveinternalv1alpha1.SyncStatus{} - for _, syncStatus := range foundClusterSync.Status.SyncSets { - if syncStatus.Name == instance.Name { - foundSyncStatuses = append(foundSyncStatuses, syncStatus) - } - } - - if len(foundSyncStatuses) != 1 { - err = fmt.Errorf("unable to find SyncStatus with Name %v in ClusterSyncs %v", instance.Name, clusterSyncNsN.Name) - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncCompleted, - Status: metav1.ConditionFalse, - Reason: inventoryv1alpha1.ConditionReasonSyncStatusNotFound, - Message: err.Error(), - }) - return false - } - - foundSyncStatus := foundSyncStatuses[0] - if foundSyncStatus.ObservedGeneration != foundSyncSet.Generation { - klog.Errorf("SyncStatus.ObserveGeneration does not match SyncSet.Generation, %v", err) - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncStarted, - Status: metav1.ConditionFalse, - Reason: inventoryv1alpha1.ConditionReasonSyncSetNotApplied, - Message: "SyncSet not yet been applied", - }) - return false - } - - return r.checkHiveSyncStatus(ctx, instance, foundSyncSet, foundSyncStatus) -} - -func (r *ReconcileBareMetalAsset) checkHiveSyncStatus(ctx context.Context, - instance *inventoryv1alpha1.BareMetalAsset, - syncSet *hivev1.SyncSet, - syncSetStatus hiveinternalv1alpha1.SyncStatus, -) bool { - resourceCount := len(syncSet.Spec.Resources) - patchCount := len(syncSet.Spec.Patches) - - if resourceCount == 1 { - if syncSetStatus.Result == hiveinternalv1alpha1.SuccessSyncSetResult { - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncCompleted, - Status: metav1.ConditionTrue, - Reason: inventoryv1alpha1.ConditionReasonSyncSetAppliedSuccessful, - Message: "Successfully applied SyncSet", - }) - return true - } - - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncCompleted, - Status: metav1.ConditionFalse, - Reason: inventoryv1alpha1.ConditionReasonSyncSetAppliedFailed, - Message: fmt.Sprintf("Failed to apply SyncSet with err %s", syncSetStatus.FailureMessage), - }) - return false - } - - if patchCount == 1 { - if syncSetStatus.Result == hiveinternalv1alpha1.SuccessSyncSetResult { - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncCompleted, - Status: metav1.ConditionTrue, - Reason: inventoryv1alpha1.ConditionReasonSyncSetAppliedSuccessful, - Message: "Successfully applied SyncSet", - }) - return true - } - - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncCompleted, - Status: metav1.ConditionFalse, - Reason: inventoryv1alpha1.ConditionReasonSyncSetAppliedFailed, - Message: fmt.Sprintf("Failed to apply SyncSet with err %s", syncSetStatus.FailureMessage), - }) - - if strings.Contains(syncSetStatus.FailureMessage, "not found") { - if r.client.Delete(ctx, syncSet) != nil { - klog.Errorf("Failed to delete syncSet %v", instance.Name) - } - } - return false - } - - err := fmt.Errorf( - "unexpected number of resources found on SyncSet. Expected (1) Found (%v)", - resourceCount, - ) - - meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ - Type: inventoryv1alpha1.ConditionAssetSyncCompleted, - Status: metav1.ConditionFalse, - Reason: inventoryv1alpha1.ConditionReasonUnexpectedResourceCount, - Message: err.Error(), - }) - - return false -} - -func (r *ReconcileBareMetalAsset) deleteSyncSet(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) (reconcile.Result, error) { - if instance.Spec.ClusterDeployment.Namespace == "" && instance.Spec.ClusterDeployment.Name == "" { - instance.ObjectMeta.Finalizers = remove(instance.ObjectMeta.Finalizers, BareMetalAssetFinalizer) - return reconcile.Result{}, r.client.Update(ctx, instance) - } - - syncSet := r.newHiveSyncSet(instance, false) - foundSyncSet := &hivev1.SyncSet{} - err := r.client.Get(ctx, types.NamespacedName{Name: syncSet.Name, Namespace: syncSet.Namespace}, foundSyncSet) - if err != nil { - if errors.IsNotFound(err) { - instance.ObjectMeta.Finalizers = remove(instance.ObjectMeta.Finalizers, BareMetalAssetFinalizer) - return reconcile.Result{}, r.client.Update(ctx, instance) - } - klog.Errorf("Failed to get Hive SyncSet (%s/%s) in cleanup, %v", syncSet.Namespace, syncSet.Name, err) - return reconcile.Result{}, err - } - - // Only update the SyncSet if the BareMetalHost is not defined in the - // Resources section - if len(foundSyncSet.Spec.SyncSetCommonSpec.Resources) == 0 { - foundSyncSet.Spec = syncSet.Spec - return reconcile.Result{}, r.client.Update(ctx, foundSyncSet) - } - - // Don't delete the SyncSet until the ClusterSync is applied - if r.checkHiveClusterSync(ctx, instance) { - return reconcile.Result{}, r.client.Delete(ctx, syncSet) - } - return reconcile.Result{}, nil -} - -func (r *ReconcileBareMetalAsset) cleanupOldHiveSyncSet(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) error { - // If clusterDeployment.Namespace is updated to a new namespace or removed from the spec, we need to - // ensure that existing syncset, if any, is deleted from the old namespace. - // We can get the old syncset from relatedobjects if it exists. - hscRef := corev1.ObjectReference{} - for _, ro := range instance.Status.RelatedObjects { - if ro.Name == instance.Name && - ro.Kind == "SyncSet" && - ro.APIVersion == hivev1.SchemeGroupVersion.String() && - ro.Namespace != instance.Spec.ClusterDeployment.Namespace { - hscRef = ro - break - } - } - if hscRef == (corev1.ObjectReference{}) { - // Nothing to do if no such syncset was found - return nil - } - - // Delete syncset in old namespace - klog.Infof("Cleaning up Hive SyncSet in old namespace (%s/%s)", hscRef.Name, hscRef.Namespace) - err := r.client.Delete(ctx, &hivev1.SyncSet{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: hscRef.Namespace, - Name: hscRef.Name, - }, - }) - - if err != nil { - if !errors.IsNotFound(err) { - klog.Errorf("Failed to delete Hive SyncSet (%s/%s), %v", hscRef.Name, hscRef.Namespace, err) - return err - } - } - - // Remove SyncSet from related objects - err = objectreferencesv1.RemoveObjectReference(&instance.Status.RelatedObjects, hscRef) - if err != nil { - klog.Errorf("Failed to remove reference from status.RelatedObjects, %v", err) - return err - } - - return nil -} - -// Checks whether a string is contained within a slice -func contains(slice []string, s string) bool { - for _, item := range slice { - if item == s { - return true - } - } - return false -} - -// Removes a given string from a slice and returns the new slice -func remove(slice []string, s string) (result []string) { - for _, item := range slice { - if item == s { - continue - } - result = append(result, item) - } - return -} diff --git a/pkg/controllers/inventory/inventory_controller_test.go b/pkg/controllers/inventory/inventory_controller_test.go deleted file mode 100644 index b9ee75970..000000000 --- a/pkg/controllers/inventory/inventory_controller_test.go +++ /dev/null @@ -1,540 +0,0 @@ -package inventory - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - hivev1 "github.com/openshift/hive/apis/hive/v1" - hiveinternalv1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" - inventoryv1alpha1 "github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1" - bmaerrors "github.com/stolostron/multicloud-operators-foundation/pkg/controllers/inventory/errors" - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - testName = "foo" - testNamespace = "bar" - testBMHKind = "BareMetalHost" - testSSKind = "SyncSet" - testRoleLabel = "metal3.io/role" -) - -var _ reconcile.Reconciler = &ReconcileBareMetalAsset{} - -func TestMain(m *testing.M) { - // AddToSchemes may be used to add all resources defined in the project to a Scheme - var AddToSchemes runtime.SchemeBuilder - // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back - AddToSchemes = append(AddToSchemes, inventoryv1alpha1.AddToScheme, hiveinternalv1alpha1.AddToScheme) - - if err := AddToSchemes.AddToScheme(scheme.Scheme); err != nil { - klog.Errorf("Failed adding apis to scheme, %v", err) - os.Exit(1) - } - - if err := hivev1.AddToScheme(scheme.Scheme); err != nil { - klog.Errorf("Failed adding hivev1 to scheme, %v", err) - os.Exit(1) - } - exitVal := m.Run() - os.Exit(exitVal) -} - -func TestReconcile(t *testing.T) { - ctx := context.Background() - tests := []struct { - name string - existingObjs []client.Object - expectedErrorType error - req reconcile.Request - requeue bool - }{ - { - name: "BareMetalAssetNotFound", - existingObjs: []client.Object{}, - req: reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: testName, - Namespace: testNamespace, - }, - }, - }, - { - name: "BareMetalAssetFound", - existingObjs: []client.Object{newBMA()}, - req: reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: testName, - Namespace: testNamespace, - }, - }, - requeue: true, - }, - { - name: "All found", - existingObjs: []client.Object{ - newBMAWithClusterDeployment(), - newSecret(), - newClusterDeployment(), - newSyncSet(), - }, - req: reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: testName, - Namespace: testNamespace, - }, - }, - }, - { - name: "ClusterDeploymentsNotFound", - existingObjs: []client.Object{ - newBMAWithClusterDeployment(), - newSecret(), - }, - expectedErrorType: fmt.Errorf("clusterdeployments.hive.openshift.io \"%s\" not found", testName), - req: reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: testName, - Namespace: testNamespace, - }, - }, - }, - { - name: "BareMetalAssetWithDeletionTimestampAndFinalizer", - existingObjs: []client.Object{ - func() *inventoryv1alpha1.BareMetalAsset { - bma := newBMA() - bma.SetDeletionTimestamp(&metav1.Time{Time: time.Now()}) - bma.SetFinalizers([]string{BareMetalAssetFinalizer}) - return bma - }(), - }, - req: reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: testName, - Namespace: testNamespace, - }, - }, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - rbma := newTestReconciler(test.existingObjs) - res, err := rbma.Reconcile(ctx, test.req) - validateErrorAndStatusConditions(t, err, test.expectedErrorType, nil, nil) - - if test.requeue { - assert.Equal(t, res, reconcile.Result{Requeue: false, RequeueAfter: time.Duration(60) * time.Second}) - } else { - assert.Equal(t, res, reconcile.Result{Requeue: false, RequeueAfter: 0}) - } - }) - } -} - -func TestCheckAssetSecret(t *testing.T) { - ctx := context.Background() - tests := []struct { - name string - existingObjs []client.Object - expectedErrorType error - expectedConditions []metav1.Condition - bma *inventoryv1alpha1.BareMetalAsset - validateSecret func(*testing.T, client.Client) - }{ - { - name: "SecretNotFound", - existingObjs: []client.Object{}, - expectedErrorType: bmaerrors.NewAssetSecretNotFoundError(testName, testNamespace), - expectedConditions: []metav1.Condition{{ - Type: inventoryv1alpha1.ConditionCredentialsFound, - Status: metav1.ConditionFalse, - }}, - bma: newBMA(), - validateSecret: func(t *testing.T, c client.Client) {}, - }, - { - name: "SecretFound", - existingObjs: []client.Object{newSecret()}, - expectedConditions: []metav1.Condition{{ - Type: inventoryv1alpha1.ConditionCredentialsFound, - Status: metav1.ConditionTrue, - }}, - bma: newBMA(), - validateSecret: func(t *testing.T, c client.Client) { - secret := newSecret() - c.Get(context.TODO(), types.NamespacedName{Namespace: secret.Namespace, Name: secret.Name}, secret) - if len(secret.Labels) != 1 { - t.Errorf("expect two labels on secrets") - } - }, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - rbma := newTestReconciler(test.existingObjs) - err := rbma.checkAssetSecret(ctx, test.bma) - validateErrorAndStatusConditions(t, err, test.expectedErrorType, test.expectedConditions, test.bma) - test.validateSecret(t, rbma.client) - }) - } -} - -func TestEnsureLabels(t *testing.T) { - ctx := context.Background() - tests := []struct { - name string - existingObjs []client.Object - expectedErrorType error - bma *inventoryv1alpha1.BareMetalAsset - }{ - { - name: "EnsureLabelsSuccess", - existingObjs: []client.Object{newBMA()}, - bma: newBMAWithClusterDeployment(), - }, - { - name: "EnsureLabelsSuccessNoClusterDeployment", - existingObjs: []client.Object{newBMA()}, - bma: newBMA(), - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - rbma := newTestReconciler(test.existingObjs) - err := rbma.ensureLabels(ctx, test.bma) - validateErrorAndStatusConditions(t, err, test.expectedErrorType, nil, test.bma) - }) - } -} - -func TestCheckClusterDeployment(t *testing.T) { - ctx := context.Background() - tests := []struct { - name string - existingObjs []client.Object - expectedErrorType error - expectedConditions []metav1.Condition - bma *inventoryv1alpha1.BareMetalAsset - }{ - { - name: "No cluster specified", - existingObjs: []client.Object{}, - expectedErrorType: bmaerrors.NewNoClusterError(), - expectedConditions: []metav1.Condition{{ - Type: inventoryv1alpha1.ConditionClusterDeploymentFound, - Status: metav1.ConditionFalse, - }}, - bma: newBMA(), - }, - { - name: "ClusterDeploymentNotFound", - existingObjs: []client.Object{}, - expectedErrorType: fmt.Errorf("clusterdeployments.hive.openshift.io \"%s\" not found", testName), - expectedConditions: []metav1.Condition{{ - Type: inventoryv1alpha1.ConditionClusterDeploymentFound, - Status: metav1.ConditionFalse, - }}, - bma: newBMAWithClusterDeployment(), - }, - { - name: "ClusterDeploymentFound", - existingObjs: []client.Object{newClusterDeployment()}, - expectedConditions: []metav1.Condition{{ - Type: inventoryv1alpha1.ConditionClusterDeploymentFound, - Status: metav1.ConditionTrue, - }}, - bma: newBMAWithClusterDeployment(), - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - rbma := newTestReconciler(test.existingObjs) - err := rbma.checkClusterDeployment(ctx, test.bma) - validateErrorAndStatusConditions(t, err, test.expectedErrorType, test.expectedConditions, test.bma) - }) - } -} - -func TestEnsureHiveSyncSet(t *testing.T) { - ctx := context.Background() - tests := []struct { - name string - existingObjs []client.Object - expectedConditions []metav1.Condition - bma *inventoryv1alpha1.BareMetalAsset - }{ - { - name: "SyncSetCreate", - existingObjs: []client.Object{}, - expectedConditions: []metav1.Condition{ - { - Type: inventoryv1alpha1.ConditionAssetSyncStarted, - Status: metav1.ConditionTrue, - }, - { - Type: inventoryv1alpha1.ConditionAssetSyncCompleted, - Status: metav1.ConditionFalse, - }, - }, - bma: newBMAWithClusterDeployment(), - }, - { - name: "SyncSetUpdate", - existingObjs: []client.Object{func() *hivev1.SyncSet { - return &hivev1.SyncSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: testName, - Namespace: testNamespace, - OwnerReferences: []metav1.OwnerReference{ - { - Kind: testSSKind, - Name: testName, - }, - }, - }, - } - }()}, - expectedConditions: []metav1.Condition{ - { - Type: inventoryv1alpha1.ConditionAssetSyncStarted, - Status: metav1.ConditionTrue, - }, - { - Type: inventoryv1alpha1.ConditionAssetSyncCompleted, - Status: metav1.ConditionFalse, - }, - }, - bma: newBMAWithClusterDeployment(), - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - rbma := newTestReconciler(test.existingObjs) - err := rbma.ensureHiveSyncSet(ctx, test.bma) - validateErrorAndStatusConditions(t, err, nil, test.expectedConditions, test.bma) - - syncSet := &hivev1.SyncSet{} - syncSetError := rbma.client.Get(context.TODO(), types.NamespacedName{Name: testName, Namespace: testNamespace}, syncSet) - - assert.NoError(t, syncSetError) - - assert.Equal(t, syncSet.ObjectMeta.Labels[ClusterDeploymentNameLabel], test.bma.Spec.ClusterDeployment.Name) - assert.Equal(t, syncSet.ObjectMeta.Labels[ClusterDeploymentNamespaceLabel], test.bma.Spec.ClusterDeployment.Namespace) - assert.Equal(t, syncSet.ObjectMeta.Labels[testRoleLabel], string(test.bma.Spec.Role)) - - if test.name != "SyncSetCreate" { - assert.Equal(t, test.bma.Status.RelatedObjects[0].Kind, syncSet.TypeMeta.Kind) - assert.Equal(t, test.bma.Status.RelatedObjects[0].Name, syncSet.Name) - assert.Equal(t, test.bma.Status.RelatedObjects[0].APIVersion, syncSet.TypeMeta.APIVersion) - } - }) - } -} - -func TestCheckClusterSync(t *testing.T) { - ctx := context.Background() - tests := []struct { - name string - existingObjs []client.Object - returnValue bool - expectedConditions []metav1.Condition - bma *inventoryv1alpha1.BareMetalAsset - }{ - { - name: "ClusterSyncNotFound", - existingObjs: []client.Object{newBMA()}, - returnValue: false, - expectedConditions: []metav1.Condition{{ - Type: inventoryv1alpha1.ConditionAssetSyncCompleted, - Status: metav1.ConditionFalse, - }}, - bma: newBMA(), - }, - { - name: "UnexpectedResourceCount", - existingObjs: []client.Object{newBMA(), newClusterSync()}, - returnValue: false, - expectedConditions: []metav1.Condition{{ - Type: inventoryv1alpha1.ConditionAssetSyncCompleted, - Status: metav1.ConditionFalse, - }}, - bma: newBMA(), - }, - { - name: "SecretApplySuccessSyncCondition", - existingObjs: []client.Object{newBMAWithClusterDeployment(), newSyncSet(), newClusterSyncInstanceResources()}, - returnValue: true, - expectedConditions: []metav1.Condition{{ - Type: inventoryv1alpha1.ConditionAssetSyncCompleted, - Status: metav1.ConditionTrue, - }}, - bma: newBMAWithClusterDeployment(), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - rbma := newTestReconciler(test.existingObjs) - assert.Equal(t, test.returnValue, rbma.checkHiveClusterSync(ctx, test.bma)) - validateErrorAndStatusConditions(t, nil, nil, test.expectedConditions, test.bma) - }) - } -} - -func TestDeleteSyncSet(t *testing.T) { - ctx := context.Background() - tests := []struct { - name string - existingObjs []client.Object - bma *inventoryv1alpha1.BareMetalAsset - }{ - { - name: "ClusterDeploymentWithEmptyNamespace", - existingObjs: []client.Object{newBMA()}, - bma: newBMA(), - }, - { - name: "ClusterDeploymentWithNamespace", - existingObjs: []client.Object{newBMA()}, - bma: newBMAWithClusterDeployment(), - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - rbma := newTestReconciler(test.existingObjs) - _, err := rbma.deleteSyncSet(ctx, test.bma) - validateErrorAndStatusConditions(t, err, nil, nil, test.bma) - }) - } -} - -func newBMA() *inventoryv1alpha1.BareMetalAsset { - return &inventoryv1alpha1.BareMetalAsset{ - TypeMeta: metav1.TypeMeta{ - Kind: testBMHKind, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: testName, - Namespace: testNamespace, - ResourceVersion: "0", - }, - Spec: inventoryv1alpha1.BareMetalAssetSpec{ - BMC: inventoryv1alpha1.BMCDetails{ - CredentialsName: testName, - }, - Role: testRoleLabel, - }, - } -} - -func newBMAWithClusterDeployment() *inventoryv1alpha1.BareMetalAsset { - bma := newBMA() - bma.Spec.ClusterDeployment = metav1.ObjectMeta{ - Name: testName, - Namespace: testNamespace, - } - return bma -} - -func newSecret() *corev1.Secret { - return &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: testName, - Namespace: testNamespace, - }, - } -} - -func newClusterDeployment() *hivev1.ClusterDeployment { - cd := &hivev1.ClusterDeployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: testName, - Namespace: testNamespace, - }, - } - return cd -} - -func newClusterSync() *hiveinternalv1alpha1.ClusterSync { - return &hiveinternalv1alpha1.ClusterSync{ - ObjectMeta: metav1.ObjectMeta{ - Name: testName, - Namespace: testNamespace, - }, - Status: hiveinternalv1alpha1.ClusterSyncStatus{}, - } -} - -func newSyncSet() *hivev1.SyncSet { - return &hivev1.SyncSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: testName, - Namespace: testNamespace, - }, - Spec: hivev1.SyncSetSpec{ - SyncSetCommonSpec: hivev1.SyncSetCommonSpec{ - Resources: []runtime.RawExtension{ - { - Object: newSecret(), - }, - }, - }, - ClusterDeploymentRefs: []corev1.LocalObjectReference{ - { - Name: testName, - }, - }, - }, - } -} - -func newClusterSyncInstanceResources() *hiveinternalv1alpha1.ClusterSync { - ssi := newClusterSync() - ssi.Status.SyncSets = []hiveinternalv1alpha1.SyncStatus{ - { - Name: testName, - Result: hiveinternalv1alpha1.SuccessSyncSetResult, - }, - } - return ssi -} - -func newTestReconciler(existingObjs []client.Object) *ReconcileBareMetalAsset { - fakeClient := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(existingObjs...).Build() - rbma := &ReconcileBareMetalAsset{ - client: fakeClient, - scheme: scheme.Scheme, - } - - return rbma -} - -func validateErrorAndStatusConditions(t *testing.T, err error, expectedErrorType error, - expectedConditions []metav1.Condition, bma *inventoryv1alpha1.BareMetalAsset) { - if expectedErrorType != nil { - assert.EqualError(t, err, expectedErrorType.Error()) - } else { - assert.NoError(t, err) - } - for _, condition := range expectedConditions { - assert.True(t, meta.IsStatusConditionPresentAndEqual(bma.Status.Conditions, condition.Type, condition.Status)) - } - if bma != nil { - assert.Equal(t, len(expectedConditions), len(bma.Status.Conditions)) - } -} diff --git a/test/e2e/inventory_test.go b/test/e2e/inventory_test.go deleted file mode 100644 index dd0b0a8d3..000000000 --- a/test/e2e/inventory_test.go +++ /dev/null @@ -1,247 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "time" - - "github.com/openshift/hive/apis/hive/v1/aws" - - "github.com/onsi/ginkgo" - "github.com/onsi/gomega" - hivev1 "github.com/openshift/hive/apis/hive/v1" - hiveinternalv1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" - "github.com/stolostron/multicloud-operators-foundation/test/e2e/util" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/rand" -) - -var bmaGVR = schema.GroupVersionResource{ - Group: "inventory.open-cluster-management.io", - Version: "v1alpha1", - Resource: "baremetalassets", -} - -var _ = ginkgo.Describe("Testing BareMetalAsset", func() { - var testNamespace string - var testName string - ginkgo.BeforeEach(func() { - testName = "mycluster" - suffix := rand.String(6) - testNamespace = fmt.Sprintf("bma-ns-%v", suffix) - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: testNamespace, - }, - } - // create ns - _, err := kubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - // create ClusterDeployment - clusterDeployment := &hivev1.ClusterDeployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: testName, - Namespace: testNamespace, - }, - Spec: hivev1.ClusterDeploymentSpec{ - BaseDomain: "hive.example.com", - ClusterName: testName, - Platform: hivev1.Platform{ - AWS: &aws.Platform{ - CredentialsSecretRef: corev1.LocalObjectReference{ - Name: "aws-clusterpool-aws-creds", - }, - Region: "us-east", - }, - }, - Provisioning: &hivev1.Provisioning{ - InstallConfigSecretRef: &corev1.LocalObjectReference{ - Name: "secret-ref", - }, - }, - PullSecretRef: &corev1.LocalObjectReference{ - Name: "pull-ref", - }, - }, - } - _, err = hiveClient.HiveV1().ClusterDeployments(testNamespace).Create(context.Background(), clusterDeployment, metav1.CreateOptions{}) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - - clusterSync := &hiveinternalv1alpha1.ClusterSync{ - ObjectMeta: metav1.ObjectMeta{ - Name: testName, - Namespace: testNamespace, - }, - } - _, err = hiveClient.HiveinternalV1alpha1().ClusterSyncs(testNamespace).Create(context.Background(), clusterSync, metav1.CreateOptions{}) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - }) - ginkgo.AfterEach(func() { - //clean up clusterset - err := kubeClient.CoreV1().Namespaces().Delete(context.Background(), testNamespace, metav1.DeleteOptions{}) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - }) - - ginkgo.Context("Create bma", func() { - ginkgo.It("BMA should be auto created successfully", func() { - // Create bma at first - bma, err := util.LoadResourceFromJSON(util.BMATemplate) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - err = unstructured.SetNestedField(bma.Object, testNamespace, "metadata", "namespace") - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - err = unstructured.SetNestedField(bma.Object, testName, "metadata", "name") - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - err = unstructured.SetNestedField(bma.Object, testNamespace, "spec", "clusterDeployment", "namespace") - _, err = util.CreateResource(dynamicClient, bmaGVR, bma) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to create %s", bmaGVR.Resource) - - // create secret - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-secret", - Namespace: testNamespace, - }, - } - _, err = kubeClient.CoreV1().Secrets(testNamespace).Create(context.Background(), secret, metav1.CreateOptions{}) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - - // ensure syncset is created - var syncSet *hivev1.SyncSet - gomega.Eventually(func() bool { - syncSet, err = hiveClient.HiveV1().SyncSets(testNamespace).Get(context.Background(), testName, metav1.GetOptions{}) - if err != nil { - return false - } - return true - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - - // manually update clustersync status - clusterSync, err := hiveClient.HiveinternalV1alpha1().ClusterSyncs(testNamespace).Get(context.Background(), testName, metav1.GetOptions{}) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - clusterSync.Status.SyncSets = []hiveinternalv1alpha1.SyncStatus{ - { - Name: testName, - ObservedGeneration: syncSet.Generation, - LastTransitionTime: metav1.Now(), - Result: hiveinternalv1alpha1.SuccessSyncSetResult, - }, - } - _, err = hiveClient.HiveinternalV1alpha1().ClusterSyncs(testNamespace).UpdateStatus(context.Background(), clusterSync, metav1.UpdateOptions{}) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - // ensure conditions of bma are correct - gomega.Eventually(func() error { - // sync status of syncSet in clusterSync - syncSet, err := hiveClient.HiveV1().SyncSets(testNamespace).Get(context.Background(), testName, metav1.GetOptions{}) - if err != nil { - return nil - } - - clusterSync, err := hiveClient.HiveinternalV1alpha1().ClusterSyncs(testNamespace).Get(context.Background(), testName, metav1.GetOptions{}) - if err != nil { - return err - } - if len(clusterSync.Status.SyncSets) == 1 { - if clusterSync.Status.SyncSets[0].ObservedGeneration != syncSet.Generation { - clusterSync.Status.SyncSets[0].ObservedGeneration = syncSet.Generation - _, err := hiveClient.HiveinternalV1alpha1().ClusterSyncs(testNamespace).UpdateStatus(context.Background(), clusterSync, metav1.UpdateOptions{}) - if err != nil { - return err - } - } - } - - bma, err := dynamicClient.Resource(bmaGVR).Namespace(testNamespace).Get(context.Background(), testName, metav1.GetOptions{}) - if err != nil { - return err - } - - status := bma.Object["status"] - conditions := status.(map[string]interface{})["conditions"] - for _, condition := range conditions.([]interface{}) { - conditionStatus := condition.(map[string]interface{})["status"] - if conditionStatus.(string) != "True" { - return fmt.Errorf("contidion %v is not correct, reason %v, message %v", - condition.(map[string]interface{})["type"], - condition.(map[string]interface{})["reason"], - condition.(map[string]interface{})["message"]) - } - } - return nil - - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - }) - - ginkgo.Context("Deleting a ClusterDeployment", func() { - ginkgo.BeforeEach(func() { - bma, err := util.LoadResourceFromJSON(util.BMATemplate) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - err = unstructured.SetNestedField(bma.Object, testNamespace, "metadata", "namespace") - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - err = unstructured.SetNestedField(bma.Object, testName, "metadata", "name") - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - err = unstructured.SetNestedField(bma.Object, testNamespace, "spec", "clusterDeployment", "namespace") - // create managedClusterView to real cluster - _, err = util.CreateResource(dynamicClient, bmaGVR, bma) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to create %s", bmaGVR.Resource) - }) - - ginkgo.It("delete clusterdeployment should clean ref in bma", func() { - gomega.Eventually(func() error { - cd, err := hiveClient.HiveV1().ClusterDeployments(testNamespace).Get(context.Background(), testName, metav1.GetOptions{}) - if err != nil { - return err - } - for _, finalizer := range cd.Finalizers { - if finalizer == "baremetalasset.inventory.open-cluster-management.io" { - return nil - } - } - return fmt.Errorf("there is no finalizer baremetalasset.inventory.open-cluster-management.io") - }, 60*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred()) - - gomega.Eventually(func() bool { - bma, err := util.GetResource(dynamicClient, bmaGVR, testNamespace, testName) - if err != nil { - return false - } - labels := bma.GetLabels() - if labels["metal3.io/cluster-deployment-name"] != testName { - return false - } - if labels["metal3.io/cluster-deployment-namespace"] != testNamespace { - return false - } - return true - }, 60*time.Second, 1*time.Second).Should(gomega.BeTrue()) - - err := hiveClient.HiveV1().ClusterDeployments(testNamespace).Delete(context.Background(), testName, metav1.DeleteOptions{}) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - - gomega.Eventually(func() bool { - bma, err := util.GetResource(dynamicClient, bmaGVR, testNamespace, testName) - if err != nil { - return false - } - name, _, _ := unstructured.NestedString(bma.Object, "spec", "clusterDeployment", "name") - namespace, _, _ := unstructured.NestedString(bma.Object, "spec", "clusterDeployment", "namespace") - if name != "" || namespace != "" { - return false - } - return true - }, 60*time.Second, 1*time.Second).Should(gomega.BeTrue()) - - gomega.Eventually(func() bool { - _, err := hiveClient.HiveV1().ClusterDeployments(testNamespace).Get(context.Background(), testName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return true - } - return false - }, 60*time.Second, 1*time.Second).Should(gomega.BeTrue()) - }) - }) -}) diff --git a/test/e2e/util/template.go b/test/e2e/util/template.go index 3825e53b1..8a8f0e4e0 100644 --- a/test/e2e/util/template.go +++ b/test/e2e/util/template.go @@ -150,24 +150,3 @@ const ManagedClusterViewTemplate = `{ } } }` - -const BMATemplate = ` -{ - "apiVersion": "inventory.open-cluster-management.io/v1alpha1", - "kind": "BareMetalAsset", - "metadata": { - "name": "mycluster" - }, - "spec": { - "bmc": { - "address": "localhost", - "credentialsName": "my-secret" - }, - "hardwareProfile": "test", - "Role": "worker", - "clusterDeployment": { - "name": "mycluster", - "namespace": "default" - } - } -}` diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/LICENSE b/vendor/github.com/metal3-io/baremetal-operator/apis/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/apis/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/baremetalhost_types.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/baremetalhost_types.go deleted file mode 100644 index 433a26f88..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/baremetalhost_types.go +++ /dev/null @@ -1,1063 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -// NOTE: json tags are required. Any new fields you add must have -// json tags for the fields to be serialized. - -// NOTE(dhellmann): Update docs/api.md when changing these data structure. - -const ( - // BareMetalHostFinalizer is the name of the finalizer added to - // hosts to block delete operations until the physical host can be - // deprovisioned. - BareMetalHostFinalizer string = "baremetalhost.metal3.io" - - // PausedAnnotation is the annotation that pauses the reconciliation (triggers - // an immediate requeue) - PausedAnnotation = "baremetalhost.metal3.io/paused" - - // DetachedAnnotation is the annotation which stops provisioner management of the host - // unlike in the paused case, the host status may be updated - DetachedAnnotation = "baremetalhost.metal3.io/detached" - - // StatusAnnotation is the annotation that keeps a copy of the Status of BMH - // This is particularly useful when we pivot BMH. If the status - // annotation is present and status is empty, BMO will reconstruct BMH Status - // from the status annotation. - StatusAnnotation = "baremetalhost.metal3.io/status" -) - -// RootDeviceHints holds the hints for specifying the storage location -// for the root filesystem for the image. -type RootDeviceHints struct { - // A Linux device name like "/dev/vda". The hint must match the - // actual value exactly. - DeviceName string `json:"deviceName,omitempty"` - - // A SCSI bus address like 0:0:0:0. The hint must match the actual - // value exactly. - HCTL string `json:"hctl,omitempty"` - - // A vendor-specific device identifier. The hint can be a - // substring of the actual value. - Model string `json:"model,omitempty"` - - // The name of the vendor or manufacturer of the device. The hint - // can be a substring of the actual value. - Vendor string `json:"vendor,omitempty"` - - // Device serial number. The hint must match the actual value - // exactly. - SerialNumber string `json:"serialNumber,omitempty"` - - // The minimum size of the device in Gigabytes. - // +kubebuilder:validation:Minimum=0 - MinSizeGigabytes int `json:"minSizeGigabytes,omitempty"` - - // Unique storage identifier. The hint must match the actual value - // exactly. - WWN string `json:"wwn,omitempty"` - - // Unique storage identifier with the vendor extension - // appended. The hint must match the actual value exactly. - WWNWithExtension string `json:"wwnWithExtension,omitempty"` - - // Unique vendor storage identifier. The hint must match the - // actual value exactly. - WWNVendorExtension string `json:"wwnVendorExtension,omitempty"` - - // True if the device should use spinning media, false otherwise. - Rotational *bool `json:"rotational,omitempty"` -} - -// BootMode is the boot mode of the system -// +kubebuilder:validation:Enum=UEFI;UEFISecureBoot;legacy -type BootMode string - -// Allowed boot mode from metal3 -const ( - UEFI BootMode = "UEFI" - UEFISecureBoot BootMode = "UEFISecureBoot" - Legacy BootMode = "legacy" - DefaultBootMode BootMode = UEFI -) - -// OperationalStatus represents the state of the host -type OperationalStatus string - -const ( - // OperationalStatusOK is the status value for when the host is - // configured correctly and is manageable. - OperationalStatusOK OperationalStatus = "OK" - - // OperationalStatusDiscovered is the status value for when the - // host is only partially configured, such as when when the BMC - // address is known but the login credentials are not. - OperationalStatusDiscovered OperationalStatus = "discovered" - - // OperationalStatusError is the status value for when the host - // has any sort of error. - OperationalStatusError OperationalStatus = "error" - - // OperationalStatusDelayed is the status value for when the host - // deployment needs to be delayed to limit simultaneous hosts provisioning - OperationalStatusDelayed = "delayed" - - // OperationalStatusDetached is the status value when the host is - // marked unmanaged via the detached annotation - OperationalStatusDetached OperationalStatus = "detached" -) - -// ErrorType indicates the class of problem that has caused the Host resource -// to enter an error state. -type ErrorType string - -const ( - // ProvisionedRegistrationError is an error condition occurring when the controller - // is unable to re-register an already provisioned host. - ProvisionedRegistrationError ErrorType = "provisioned registration error" - // RegistrationError is an error condition occurring when the - // controller is unable to connect to the Host's baseboard management - // controller. - RegistrationError ErrorType = "registration error" - // InspectionError is an error condition occurring when an attempt to - // obtain hardware details from the Host fails. - InspectionError ErrorType = "inspection error" - // PreparationError is an error condition occurring when do - // cleaning steps failed. - PreparationError ErrorType = "preparation error" - // ProvisioningError is an error condition occurring when the controller - // fails to provision or deprovision the Host. - ProvisioningError ErrorType = "provisioning error" - // PowerManagementError is an error condition occurring when the - // controller is unable to modify the power state of the Host. - PowerManagementError ErrorType = "power management error" - // DetachError is an error condition occurring when the - // controller is unable to detatch the host from the provisioner - DetachError ErrorType = "detach error" -) - -// ProvisioningState defines the states the provisioner will report -// the host has having. -type ProvisioningState string - -const ( - // StateNone means the state is unknown - StateNone ProvisioningState = "" - - // StateUnmanaged means there is insufficient information available to - // register the host - StateUnmanaged ProvisioningState = "unmanaged" - - // StateRegistering means we are telling the backend about the host - StateRegistering ProvisioningState = "registering" - - // StateMatchProfile means we are comparing the discovered details - // against known hardware profiles - StateMatchProfile ProvisioningState = "match profile" - - // StatePreparing means we are removing existing configuration and set new configuration to the host - StatePreparing ProvisioningState = "preparing" - - // StateReady is a deprecated name for StateAvailable - StateReady ProvisioningState = "ready" - - // StateAvailable means the host can be consumed - StateAvailable ProvisioningState = "available" - - // StateProvisioning means we are writing an image to the host's - // disk(s) - StateProvisioning ProvisioningState = "provisioning" - - // StateProvisioned means we have written an image to the host's - // disk(s) - StateProvisioned ProvisioningState = "provisioned" - - // StateExternallyProvisioned means something else is managing the - // image on the host - StateExternallyProvisioned ProvisioningState = "externally provisioned" - - // StateDeprovisioning means we are removing an image from the - // host's disk(s) - StateDeprovisioning ProvisioningState = "deprovisioning" - - // StateInspecting means we are running the agent on the host to - // learn about the hardware components available there - StateInspecting ProvisioningState = "inspecting" - - // StateDeleting means we are in the process of cleaning up the host - // ready for deletion - StateDeleting ProvisioningState = "deleting" -) - -// BMCDetails contains the information necessary to communicate with -// the bare metal controller module on host. -type BMCDetails struct { - - // Address holds the URL for accessing the controller on the - // network. - Address string `json:"address"` - - // The name of the secret containing the BMC credentials (requires - // keys "username" and "password"). - CredentialsName string `json:"credentialsName"` - - // DisableCertificateVerification disables verification of server - // certificates when using HTTPS to connect to the BMC. This is - // required when the server certificate is self-signed, but is - // insecure because it allows a man-in-the-middle to intercept the - // connection. - DisableCertificateVerification bool `json:"disableCertificateVerification,omitempty"` -} - -// HardwareRAIDVolume defines the desired configuration of volume in hardware RAID -type HardwareRAIDVolume struct { - // Size (Integer) of the logical disk to be created in GiB. - // If unspecified or set be 0, the maximum capacity of disk will be used for logical disk. - // +kubebuilder:validation:Minimum=0 - SizeGibibytes *int `json:"sizeGibibytes,omitempty"` - - // RAID level for the logical disk. The following levels are supported: 0;1;2;5;6;1+0;5+0;6+0. - // +kubebuilder:validation:Enum="0";"1";"2";"5";"6";"1+0";"5+0";"6+0" - Level string `json:"level" required:"true"` - - // Name of the volume. Should be unique within the Node. If not specified, volume name will be auto-generated. - // +kubebuilder:validation:MaxLength=64 - Name string `json:"name,omitempty"` - - // Select disks with only rotational or solid-state storage - Rotational *bool `json:"rotational,omitempty"` - - // Integer, number of physical disks to use for the logical disk. Defaults to minimum number of disks required - // for the particular RAID level. - // +kubebuilder:validation:Minimum=1 - NumberOfPhysicalDisks *int `json:"numberOfPhysicalDisks,omitempty"` -} - -// SoftwareRAIDVolume defines the desired configuration of volume in software RAID -type SoftwareRAIDVolume struct { - // Size (Integer) of the logical disk to be created in GiB. - // If unspecified or set be 0, the maximum capacity of disk will be used for logical disk. - // +kubebuilder:validation:Minimum=0 - SizeGibibytes *int `json:"sizeGibibytes,omitempty"` - - // RAID level for the logical disk. The following levels are supported: 0;1;1+0. - // +kubebuilder:validation:Enum="0";"1";"1+0" - Level string `json:"level" required:"true"` - - // A list of device hints, the number of items should be greater than or equal to 2. - // +kubebuilder:validation:MinItems=2 - PhysicalDisks []RootDeviceHints `json:"physicalDisks,omitempty"` -} - -// RAIDConfig contains the configuration that are required to config RAID in Bare Metal server -type RAIDConfig struct { - // The list of logical disks for hardware RAID, if rootDeviceHints isn't used, first volume is root volume. - // You can set the value of this field to `[]` to clear all the hardware RAID configurations. - // +optional - // +nullable - HardwareRAIDVolumes []HardwareRAIDVolume `json:"hardwareRAIDVolumes"` - - // The list of logical disks for software RAID, if rootDeviceHints isn't used, first volume is root volume. - // If HardwareRAIDVolumes is set this item will be invalid. - // The number of created Software RAID devices must be 1 or 2. - // If there is only one Software RAID device, it has to be a RAID-1. - // If there are two, the first one has to be a RAID-1, while the RAID level for the second one can be 0, 1, or 1+0. - // As the first RAID device will be the deployment device, - // enforcing a RAID-1 reduces the risk of ending up with a non-booting node in case of a disk failure. - // Software RAID will always be deleted. - // +kubebuilder:validation:MaxItems=2 - // +optional - // +nullable - SoftwareRAIDVolumes []SoftwareRAIDVolume `json:"softwareRAIDVolumes"` -} - -// FirmwareConfig contains the configuration that you want to configure BIOS settings in Bare metal server -type FirmwareConfig struct { - // Supports the virtualization of platform hardware. - // This supports following options: true, false. - // +kubebuilder:validation:Enum=true;false - VirtualizationEnabled *bool `json:"virtualizationEnabled,omitempty"` - - // Allows a single physical processor core to appear as several logical processors. - // This supports following options: true, false. - // +kubebuilder:validation:Enum=true;false - SimultaneousMultithreadingEnabled *bool `json:"simultaneousMultithreadingEnabled,omitempty"` - - // SR-IOV support enables a hypervisor to create virtual instances of a PCI-express device, potentially increasing performance. - // This supports following options: true, false. - // +kubebuilder:validation:Enum=true;false - SriovEnabled *bool `json:"sriovEnabled,omitempty"` -} - -// BareMetalHostSpec defines the desired state of BareMetalHost -type BareMetalHostSpec struct { - // Important: Run "make generate manifests" to regenerate code - // after modifying this file - - // Taints is the full, authoritative list of taints to apply to - // the corresponding Machine. This list will overwrite any - // modifications made to the Machine on an ongoing basis. - // +optional - Taints []corev1.Taint `json:"taints,omitempty"` - - // How do we connect to the BMC? - BMC BMCDetails `json:"bmc,omitempty"` - - // RAID configuration for bare metal server - RAID *RAIDConfig `json:"raid,omitempty"` - - // BIOS configuration for bare metal server - Firmware *FirmwareConfig `json:"firmware,omitempty"` - - // What is the name of the hardware profile for this host? It - // should only be necessary to set this when inspection cannot - // automatically determine the profile. - HardwareProfile string `json:"hardwareProfile,omitempty"` - - // Provide guidance about how to choose the device for the image - // being provisioned. - RootDeviceHints *RootDeviceHints `json:"rootDeviceHints,omitempty"` - - // Select the method of initializing the hardware during - // boot. Defaults to UEFI. - // +optional - BootMode BootMode `json:"bootMode,omitempty"` - - // Which MAC address will PXE boot? This is optional for some - // types, but required for libvirt VMs driven by vbmc. - // +kubebuilder:validation:Pattern=`[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}` - BootMACAddress string `json:"bootMACAddress,omitempty"` - - // Should the server be online? - Online bool `json:"online"` - - // ConsumerRef can be used to store information about something - // that is using a host. When it is not empty, the host is - // considered "in use". - ConsumerRef *corev1.ObjectReference `json:"consumerRef,omitempty"` - - // Image holds the details of the image to be provisioned. - Image *Image `json:"image,omitempty"` - - // UserData holds the reference to the Secret containing the user - // data to be passed to the host before it boots. - UserData *corev1.SecretReference `json:"userData,omitempty"` - - // PreprovisioningNetworkDataName is the name of the Secret in the - // local namespace containing network configuration (e.g content of - // network_data.json) which is passed to the preprovisioning image, and to - // the Config Drive if not overridden by specifying NetworkData. - PreprovisioningNetworkDataName string `json:"preprovisioningNetworkDataName,omitempty"` - - // NetworkData holds the reference to the Secret containing network - // configuration (e.g content of network_data.json) which is passed - // to the Config Drive. - NetworkData *corev1.SecretReference `json:"networkData,omitempty"` - - // MetaData holds the reference to the Secret containing host metadata - // (e.g. meta_data.json) which is passed to the Config Drive. - MetaData *corev1.SecretReference `json:"metaData,omitempty"` - - // Description is a human-entered text used to help identify the host - Description string `json:"description,omitempty"` - - // ExternallyProvisioned means something else is managing the - // image running on the host and the operator should only manage - // the power status and hardware inventory inspection. If the - // Image field is filled in, this field is ignored. - ExternallyProvisioned bool `json:"externallyProvisioned,omitempty"` - - // When set to disabled, automated cleaning will be avoided - // during provisioning and deprovisioning. - // +optional - // +kubebuilder:default:=metadata - // +kubebuilder:validation:Optional - AutomatedCleaningMode AutomatedCleaningMode `json:"automatedCleaningMode,omitempty"` - - // A custom deploy procedure. - // +optional - CustomDeploy *CustomDeploy `json:"customDeploy,omitempty"` -} - -// AutomatedCleaningMode is the interface to enable/disable automated cleaning -// +kubebuilder:validation:Enum:=metadata;disabled -type AutomatedCleaningMode string - -// Allowed automated cleaning modes -const ( - CleaningModeDisabled AutomatedCleaningMode = "disabled" - CleaningModeMetadata AutomatedCleaningMode = "metadata" -) - -// ChecksumType holds the algorithm name for the checksum -// +kubebuilder:validation:Enum=md5;sha256;sha512 -type ChecksumType string - -const ( - // MD5 checksum type - MD5 ChecksumType = "md5" - - // SHA256 checksum type - SHA256 ChecksumType = "sha256" - - // SHA512 checksum type - SHA512 ChecksumType = "sha512" -) - -// Image holds the details of an image either to provisioned or that -// has been provisioned. -type Image struct { - // URL is a location of an image to deploy. - URL string `json:"url"` - - // Checksum is the checksum for the image. - Checksum string `json:"checksum,omitempty"` - - // ChecksumType is the checksum algorithm for the image. - // e.g md5, sha256, sha512 - ChecksumType ChecksumType `json:"checksumType,omitempty"` - - // DiskFormat contains the format of the image (raw, qcow2, ...). - // Needs to be set to raw for raw images streaming. - // Note live-iso means an iso referenced by the url will be live-booted - // and not deployed to disk, and in this case the checksum options - // are not required and if specified will be ignored. - // +kubebuilder:validation:Enum=raw;qcow2;vdi;vmdk;live-iso - DiskFormat *string `json:"format,omitempty"` -} - -func (image *Image) IsLiveISO() bool { - return image != nil && image.DiskFormat != nil && *image.DiskFormat == "live-iso" -} - -// Custom deploy is a description of a customized deploy process. -type CustomDeploy struct { - // Custom deploy method name. - // This name is specific to the deploy ramdisk used. If you don't have - // a custom deploy ramdisk, you shouldn't use CustomDeploy. - Method string `json:"method"` -} - -// FIXME(dhellmann): We probably want some other module to own these -// data structures. - -// ClockSpeed is a clock speed in MHz -// +kubebuilder:validation:Format=double -type ClockSpeed float64 - -// ClockSpeed multipliers -const ( - MegaHertz ClockSpeed = 1.0 - GigaHertz = 1000 * MegaHertz -) - -// Capacity is a disk size in Bytes -type Capacity int64 - -// Capacity multipliers -const ( - Byte Capacity = 1 - KibiByte = Byte * 1024 - KiloByte = Byte * 1000 - MebiByte = KibiByte * 1024 - MegaByte = KiloByte * 1000 - GibiByte = MebiByte * 1024 - GigaByte = MegaByte * 1000 - TebiByte = GibiByte * 1024 - TeraByte = GigaByte * 1000 -) - -// DiskType is a disk type, i.e. HDD, SSD, NVME. -type DiskType string - -// DiskType constants. -const ( - HDD DiskType = "HDD" - SSD DiskType = "SSD" - NVME DiskType = "NVME" -) - -// CPU describes one processor on the host. -type CPU struct { - Arch string `json:"arch,omitempty"` - Model string `json:"model,omitempty"` - ClockMegahertz ClockSpeed `json:"clockMegahertz,omitempty"` - Flags []string `json:"flags,omitempty"` - Count int `json:"count,omitempty"` -} - -// Storage describes one storage device (disk, SSD, etc.) on the host. -type Storage struct { - // The Linux device name of the disk, e.g. "/dev/sda". Note that this - // may not be stable across reboots. - Name string `json:"name,omitempty"` - - // Whether this disk represents rotational storage. - // This field is not recommended for usage, please - // prefer using 'Type' field instead, this field - // will be deprecated eventually. - Rotational bool `json:"rotational,omitempty"` - - // Device type, one of: HDD, SSD, NVME. - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Enum=HDD;SSD;NVME; - Type DiskType `json:"type,omitempty"` - - // The size of the disk in Bytes - SizeBytes Capacity `json:"sizeBytes,omitempty"` - - // The name of the vendor of the device - Vendor string `json:"vendor,omitempty"` - - // Hardware model - Model string `json:"model,omitempty"` - - // The serial number of the device - SerialNumber string `json:"serialNumber,omitempty"` - - // The WWN of the device - WWN string `json:"wwn,omitempty"` - - // The WWN Vendor extension of the device - WWNVendorExtension string `json:"wwnVendorExtension,omitempty"` - - // The WWN with the extension - WWNWithExtension string `json:"wwnWithExtension,omitempty"` - - // The SCSI location of the device - HCTL string `json:"hctl,omitempty"` -} - -// VLANID is a 12-bit 802.1Q VLAN identifier -// +kubebuilder:validation:Type=integer -// +kubebuilder:validation:Minimum=0 -// +kubebuilder:validation:Maximum=4094 -type VLANID int32 - -// VLAN represents the name and ID of a VLAN -type VLAN struct { - ID VLANID `json:"id,omitempty"` - - Name string `json:"name,omitempty"` -} - -// NIC describes one network interface on the host. -type NIC struct { - // The name of the network interface, e.g. "en0" - Name string `json:"name,omitempty"` - - // The vendor and product IDs of the NIC, e.g. "0x8086 0x1572" - Model string `json:"model,omitempty"` - - // The device MAC address - // +kubebuilder:validation:Pattern=`[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}` - MAC string `json:"mac,omitempty"` - - // The IP address of the interface. This will be an IPv4 or IPv6 address - // if one is present. If both IPv4 and IPv6 addresses are present in a - // dual-stack environment, two nics will be output, one with each IP. - IP string `json:"ip,omitempty"` - - // The speed of the device in Gigabits per second - SpeedGbps int `json:"speedGbps,omitempty"` - - // The VLANs available - VLANs []VLAN `json:"vlans,omitempty"` - - // The untagged VLAN ID - VLANID VLANID `json:"vlanId,omitempty"` - - // Whether the NIC is PXE Bootable - PXE bool `json:"pxe,omitempty"` -} - -// Firmware describes the firmware on the host. -type Firmware struct { - // The BIOS for this firmware - BIOS BIOS `json:"bios,omitempty"` -} - -// BIOS describes the BIOS version on the host. -type BIOS struct { - // The release/build date for this BIOS - Date string `json:"date,omitempty"` - - // The vendor name for this BIOS - Vendor string `json:"vendor,omitempty"` - - // The version of the BIOS - Version string `json:"version,omitempty"` -} - -// HardwareDetails collects all of the information about hardware -// discovered on the host. -type HardwareDetails struct { - SystemVendor HardwareSystemVendor `json:"systemVendor,omitempty"` - Firmware Firmware `json:"firmware,omitempty"` - RAMMebibytes int `json:"ramMebibytes,omitempty"` - NIC []NIC `json:"nics,omitempty"` - Storage []Storage `json:"storage,omitempty"` - CPU CPU `json:"cpu,omitempty"` - Hostname string `json:"hostname,omitempty"` -} - -// HardwareSystemVendor stores details about the whole hardware system. -type HardwareSystemVendor struct { - Manufacturer string `json:"manufacturer,omitempty"` - ProductName string `json:"productName,omitempty"` - SerialNumber string `json:"serialNumber,omitempty"` -} - -// CredentialsStatus contains the reference and version of the last -// set of BMC credentials the controller was able to validate. -type CredentialsStatus struct { - Reference *corev1.SecretReference `json:"credentials,omitempty"` - Version string `json:"credentialsVersion,omitempty"` -} - -// RebootMode defines known variations of reboot modes -type RebootMode string - -const ( - // RebootModeHard defined for hard reset of a node - RebootModeHard RebootMode = "hard" - // RebootModeSoft defined for soft reset of a node - RebootModeSoft RebootMode = "soft" -) - -// RebootAnnotationArguments defines the arguments of the RebootAnnotation type -type RebootAnnotationArguments struct { - Mode RebootMode `json:"mode"` -} - -// Match compares the saved status information with the name and -// content of a secret object. -func (cs CredentialsStatus) Match(secret corev1.Secret) bool { - switch { - case cs.Reference == nil: - return false - case cs.Reference.Name != secret.ObjectMeta.Name: - return false - case cs.Reference.Namespace != secret.ObjectMeta.Namespace: - return false - case cs.Version != secret.ObjectMeta.ResourceVersion: - return false - } - return true -} - -// OperationMetric contains metadata about an operation (inspection, -// provisioning, etc.) used for tracking metrics. -type OperationMetric struct { - // +nullable - Start metav1.Time `json:"start,omitempty"` - // +nullable - End metav1.Time `json:"end,omitempty"` -} - -// Duration returns the length of time that was spent on the -// operation. If the operation is not finished, it returns 0. -func (om OperationMetric) Duration() time.Duration { - if om.Start.IsZero() { - return 0 - } - return om.End.Time.Sub(om.Start.Time) -} - -// OperationHistory holds information about operations performed on a -// host. -type OperationHistory struct { - Register OperationMetric `json:"register,omitempty"` - Inspect OperationMetric `json:"inspect,omitempty"` - Provision OperationMetric `json:"provision,omitempty"` - Deprovision OperationMetric `json:"deprovision,omitempty"` -} - -// BareMetalHostStatus defines the observed state of BareMetalHost -type BareMetalHostStatus struct { - // Important: Run "make generate manifests" to regenerate code - // after modifying this file - - // OperationalStatus holds the status of the host - // +kubebuilder:validation:Enum="";OK;discovered;error;delayed;detached - OperationalStatus OperationalStatus `json:"operationalStatus"` - - // ErrorType indicates the type of failure encountered when the - // OperationalStatus is OperationalStatusError - // +kubebuilder:validation:Enum=provisioned registration error;registration error;inspection error;preparation error;provisioning error;power management error - ErrorType ErrorType `json:"errorType,omitempty"` - - // LastUpdated identifies when this status was last observed. - // +optional - LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` - - // The name of the profile matching the hardware details. - HardwareProfile string `json:"hardwareProfile"` - - // The hardware discovered to exist on the host. - HardwareDetails *HardwareDetails `json:"hardware,omitempty"` - - // Information tracked by the provisioner. - Provisioning ProvisionStatus `json:"provisioning"` - - // the last credentials we were able to validate as working - GoodCredentials CredentialsStatus `json:"goodCredentials,omitempty"` - - // the last credentials we sent to the provisioning backend - TriedCredentials CredentialsStatus `json:"triedCredentials,omitempty"` - - // the last error message reported by the provisioning subsystem - ErrorMessage string `json:"errorMessage"` - - // indicator for whether or not the host is powered on - PoweredOn bool `json:"poweredOn"` - - // OperationHistory holds information about operations performed - // on this host. - OperationHistory OperationHistory `json:"operationHistory,omitempty"` - - // ErrorCount records how many times the host has encoutered an error since the last successful operation - // +kubebuilder:default:=0 - ErrorCount int `json:"errorCount"` -} - -// ProvisionStatus holds the state information for a single target. -type ProvisionStatus struct { - // An indiciator for what the provisioner is doing with the host. - State ProvisioningState `json:"state"` - - // The machine's UUID from the underlying provisioning tool - ID string `json:"ID"` - - // Image holds the details of the last image successfully - // provisioned to the host. - Image Image `json:"image,omitempty"` - - // The RootDevicehints set by the user - RootDeviceHints *RootDeviceHints `json:"rootDeviceHints,omitempty"` - - // BootMode indicates the boot mode used to provision the node - BootMode BootMode `json:"bootMode,omitempty"` - - // The Raid set by the user - RAID *RAIDConfig `json:"raid,omitempty"` - - // The Bios set by the user - Firmware *FirmwareConfig `json:"firmware,omitempty"` - - // Custom deploy procedure applied to the host. - CustomDeploy *CustomDeploy `json:"customDeploy,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// BareMetalHost is the Schema for the baremetalhosts API -// +k8s:openapi-gen=true -// +kubebuilder:resource:shortName=bmh;bmhost -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.operationalStatus",description="Operational status",priority=1 -// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.provisioning.state",description="Provisioning status" -// +kubebuilder:printcolumn:name="Consumer",type="string",JSONPath=".spec.consumerRef.name",description="Consumer using this host" -// +kubebuilder:printcolumn:name="BMC",type="string",JSONPath=".spec.bmc.address",description="Address of management controller",priority=1 -// +kubebuilder:printcolumn:name="Hardware_Profile",type="string",JSONPath=".status.hardwareProfile",description="The type of hardware detected",priority=1 -// +kubebuilder:printcolumn:name="Online",type="string",JSONPath=".spec.online",description="Whether the host is online or not" -// +kubebuilder:printcolumn:name="Error",type="string",JSONPath=".status.errorType",description="Type of the most recent error" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of BaremetalHost" -// +kubebuilder:object:root=true -type BareMetalHost struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec BareMetalHostSpec `json:"spec,omitempty"` - Status BareMetalHostStatus `json:"status,omitempty"` -} - -// BootMode returns the boot method to use for the host. -func (host *BareMetalHost) BootMode() BootMode { - mode := host.Spec.BootMode - if mode == "" { - return DefaultBootMode - } - return mode -} - -// setLabel updates the given label when necessary and returns true -// when a change is made or false when no change is made. -func (host *BareMetalHost) setLabel(name, value string) bool { - if host.Labels == nil { - host.Labels = make(map[string]string) - } - if host.Labels[name] != value { - host.Labels[name] = value - return true - } - return false -} - -// getLabel returns the value associated with the given label. If -// there is no value, an empty string is returned. -func (host *BareMetalHost) getLabel(name string) string { - if host.Labels == nil { - return "" - } - return host.Labels[name] -} - -// HasBMCDetails returns true if the BMC details are set -func (host *BareMetalHost) HasBMCDetails() bool { - return host.Spec.BMC.Address != "" || host.Spec.BMC.CredentialsName != "" -} - -// NeedsHardwareProfile returns true if the profile is not set -func (host *BareMetalHost) NeedsHardwareProfile() bool { - return host.Status.HardwareProfile == "" -} - -// HardwareProfile returns the hardware profile name for the host. -func (host *BareMetalHost) HardwareProfile() string { - return host.Status.HardwareProfile -} - -// SetHardwareProfile updates the hardware profile name and returns -// true when a change is made or false when no change is made. -func (host *BareMetalHost) SetHardwareProfile(name string) (dirty bool) { - if host.Status.HardwareProfile != name { - host.Status.HardwareProfile = name - dirty = true - } - return dirty -} - -// SetOperationalStatus updates the OperationalStatus field and returns -// true when a change is made or false when no change is made. -func (host *BareMetalHost) SetOperationalStatus(status OperationalStatus) bool { - if host.Status.OperationalStatus != status { - host.Status.OperationalStatus = status - return true - } - return false -} - -// OperationalStatus returns the contents of the OperationalStatus -// field. -func (host *BareMetalHost) OperationalStatus() OperationalStatus { - return host.Status.OperationalStatus -} - -// CredentialsKey returns a NamespacedName suitable for loading the -// Secret containing the credentials associated with the host. -func (host *BareMetalHost) CredentialsKey() types.NamespacedName { - return types.NamespacedName{ - Name: host.Spec.BMC.CredentialsName, - Namespace: host.ObjectMeta.Namespace, - } -} - -// NeedsHardwareInspection looks at the state of the host to determine -// if hardware inspection should be run. -func (host *BareMetalHost) NeedsHardwareInspection() bool { - if host.Spec.ExternallyProvisioned { - // Never perform inspection if we already know something is - // using the host and we didn't provision it. - return false - } - if host.WasProvisioned() { - // Never perform inspection if we have already provisioned - // this host, because we don't want to reboot it. - return false - } - return host.Status.HardwareDetails == nil -} - -// NeedsProvisioning compares the settings with the provisioning -// status and returns true when more work is needed or false -// otherwise. -func (host *BareMetalHost) NeedsProvisioning() bool { - if !host.Spec.Online { - // The host is not supposed to be powered on. - return false - } - - return host.hasNewImage() || host.hasNewCustomDeploy() -} - -func (host *BareMetalHost) hasNewImage() bool { - if host.Spec.Image == nil { - // Without an image, there is nothing to provision. - return false - } - if host.Spec.Image.URL == "" { - // We have an Image struct but it is empty - return false - } - if host.Status.Provisioning.Image.URL == "" { - // We have an image set, but not provisioned. - return true - } - return false -} - -func (host *BareMetalHost) hasNewCustomDeploy() bool { - if host.Spec.CustomDeploy == nil { - return false - } - if host.Spec.CustomDeploy.Method == "" { - return false - } - if host.Status.Provisioning.CustomDeploy == nil { - return true - } - if host.Status.Provisioning.CustomDeploy.Method != host.Spec.CustomDeploy.Method { - return true - } - return false -} - -// WasProvisioned returns true when we think we have placed an image -// on the host. -func (host *BareMetalHost) WasProvisioned() bool { - if host.Spec.ExternallyProvisioned { - return false - } - if host.Status.Provisioning.Image.URL != "" { - // We have an image provisioned. - return true - } - return false -} - -// UpdateGoodCredentials modifies the GoodCredentials portion of the -// Status struct to record the details of the secret containing -// credentials known to work. -func (host *BareMetalHost) UpdateGoodCredentials(currentSecret corev1.Secret) { - host.Status.GoodCredentials.Version = currentSecret.ObjectMeta.ResourceVersion - host.Status.GoodCredentials.Reference = &corev1.SecretReference{ - Name: currentSecret.ObjectMeta.Name, - Namespace: currentSecret.ObjectMeta.Namespace, - } -} - -// UpdateTriedCredentials modifies the TriedCredentials portion of the -// Status struct to record the details of the secret containing -// credentials known to work. -func (host *BareMetalHost) UpdateTriedCredentials(currentSecret corev1.Secret) { - host.Status.TriedCredentials.Version = currentSecret.ObjectMeta.ResourceVersion - host.Status.TriedCredentials.Reference = &corev1.SecretReference{ - Name: currentSecret.ObjectMeta.Name, - Namespace: currentSecret.ObjectMeta.Namespace, - } -} - -// NewEvent creates a new event associated with the object and ready -// to be published to the kubernetes API. -func (host *BareMetalHost) NewEvent(reason, message string) corev1.Event { - t := metav1.Now() - return corev1.Event{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: reason + "-", - Namespace: host.ObjectMeta.Namespace, - }, - InvolvedObject: corev1.ObjectReference{ - Kind: "BareMetalHost", - Namespace: host.Namespace, - Name: host.Name, - UID: host.UID, - APIVersion: GroupVersion.String(), - }, - Reason: reason, - Message: message, - Source: corev1.EventSource{ - Component: "metal3-baremetal-controller", - }, - FirstTimestamp: t, - LastTimestamp: t, - Count: 1, - Type: corev1.EventTypeNormal, - ReportingController: "metal3.io/baremetal-controller", - Related: host.Spec.ConsumerRef, - } -} - -// OperationMetricForState returns a pointer to the metric for the given -// provisioning state. -func (host *BareMetalHost) OperationMetricForState(operation ProvisioningState) (metric *OperationMetric) { - history := &host.Status.OperationHistory - switch operation { - case StateRegistering: - metric = &history.Register - case StateInspecting: - metric = &history.Inspect - case StateProvisioning: - metric = &history.Provision - case StateDeprovisioning: - metric = &history.Deprovision - } - return -} - -// GetChecksum method returns the checksum of an image -func (image *Image) GetChecksum() (checksum, checksumType string, ok bool) { - if image == nil { - return - } - - if image.DiskFormat != nil && *image.DiskFormat == "live-iso" { - // Checksum is not required for live-iso - ok = true - return - } - - if image.Checksum == "" { - // Return empty if checksum is not provided - return - } - - switch image.ChecksumType { - case "": - checksumType = string(MD5) - case MD5, SHA256, SHA512: - checksumType = string(image.ChecksumType) - default: - return - } - - checksum = image.Checksum - ok = true - return -} - -// +kubebuilder:object:root=true - -// BareMetalHostList contains a list of BareMetalHost -type BareMetalHostList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []BareMetalHost `json:"items"` -} - -func init() { - SchemeBuilder.Register(&BareMetalHost{}, &BareMetalHostList{}) -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/baremetalhost_validation.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/baremetalhost_validation.go deleted file mode 100644 index d8eda1424..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/baremetalhost_validation.go +++ /dev/null @@ -1,57 +0,0 @@ -package v1alpha1 - -import ( - "fmt" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - - _ "github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc" -) - -// log is for logging in this package. -var log = logf.Log.WithName("baremetalhost-validation") - -// validateHost validates BareMetalHost resource for creation -func (host *BareMetalHost) validateHost() []error { - log.Info("validate create", "name", host.Name) - var errs []error - - if err := validateRAID(host.Spec.RAID); err != nil { - errs = append(errs, err) - } - - return errs -} - -// validateChanges validates BareMetalHost resource on changes -// but also covers the validations of creation -func (host *BareMetalHost) validateChanges(old *BareMetalHost) []error { - log.Info("validate update", "name", host.Name) - var errs []error - - if err := host.validateHost(); err != nil { - errs = append(errs, err...) - } - - if old.Spec.BMC.Address != "" && host.Spec.BMC.Address != old.Spec.BMC.Address { - errs = append(errs, fmt.Errorf("BMC address can not be changed once it is set")) - } - - if old.Spec.BootMACAddress != "" && host.Spec.BootMACAddress != old.Spec.BootMACAddress { - errs = append(errs, fmt.Errorf("bootMACAddress can not be changed once it is set")) - } - - return errs -} - -func validateRAID(r *RAIDConfig) error { - if r == nil { - return nil - } - - if len(r.HardwareRAIDVolumes) > 0 && len(r.SoftwareRAIDVolumes) > 0 { - return fmt.Errorf("hardwareRAIDVolumes and softwareRAIDVolumes can not be set at the same time") - } - - return nil -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/baremetalhost_webhook.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/baremetalhost_webhook.go deleted file mode 100644 index b091100a8..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/baremetalhost_webhook.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/util/errors" - - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/webhook" -) - -// log is for logging in this package. -var baremetalhostlog = logf.Log.WithName("baremetalhost-resource") - -func (r *BareMetalHost) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() -} - -//+kubebuilder:webhook:verbs=create;update,path=/validate-metal3-io-v1alpha1-baremetalhost,mutating=false,failurePolicy=fail,sideEffects=none,admissionReviewVersions=v1;v1beta,groups=metal3.io,resources=baremetalhosts,versions=v1alpha1,name=baremetalhost.metal3.io - -var _ webhook.Validator = &BareMetalHost{} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *BareMetalHost) ValidateCreate() error { - baremetalhostlog.Info("validate create", "name", r.Name) - return errors.NewAggregate(r.validateHost()) -} - -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *BareMetalHost) ValidateUpdate(old runtime.Object) error { - baremetalhostlog.Info("validate update", "name", r.Name) - bmh, casted := old.(*BareMetalHost) - if !casted { - baremetalhostlog.Error(fmt.Errorf("old object conversion error"), "validate update error") - return nil - } - return errors.NewAggregate(r.validateChanges(bmh)) -} - -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *BareMetalHost) ValidateDelete() error { - return nil -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/firmwareschema_types.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/firmwareschema_types.go deleted file mode 100644 index df76ff5da..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/firmwareschema_types.go +++ /dev/null @@ -1,170 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// Additional data describing the firmware setting -type SettingSchema struct { - - // The type of setting. - // +kubebuilder:validation:Enum=Enumeration;String;Integer;Boolean;Password - AttributeType string `json:"attribute_type,omitempty"` - - // The allowable value for an Enumeration type setting. - AllowableValues []string `json:"allowable_values,omitempty"` - - // The lowest value for an Integer type setting. - LowerBound *int `json:"lower_bound,omitempty"` - - // The highest value for an Integer type setting. - UpperBound *int `json:"upper_bound,omitempty"` - - // Minimum length for a String type setting. - MinLength *int `json:"min_length,omitempty"` - - // Maximum length for a String type setting. - MaxLength *int `json:"max_length,omitempty"` - - // Whether or not this setting is read only. - ReadOnly *bool `json:"read_only,omitempty"` - - // Whether or not a reset is required after changing this setting. - ResetRequired *bool `json:"reset_required,omitempty"` - - // Whether or not this setting's value is unique to this node, e.g. - // a serial number. - Unique *bool `json:"unique,omitempty"` -} - -type SchemaSettingError struct { - name string - message string -} - -func (e SchemaSettingError) Error() string { - return fmt.Sprintf("Setting %s is invalid, %s", e.name, e.message) -} - -func (schema *SettingSchema) Validate(name string, value intstr.IntOrString) error { - - if schema.ReadOnly != nil && *schema.ReadOnly == true { - return SchemaSettingError{name: name, message: "it is ReadOnly"} - } - - // Check if valid based on type - switch schema.AttributeType { - case "Enumeration": - for _, av := range schema.AllowableValues { - if value.String() == av { - return nil - } - } - return SchemaSettingError{name: name, message: fmt.Sprintf("unknown enumeration value - %s", value.String())} - - case "Integer": - if schema.LowerBound != nil && value.IntValue() < *schema.LowerBound { - return SchemaSettingError{name: name, message: fmt.Sprintf("integer %s is below minimum value %d", value.String(), *schema.LowerBound)} - } - if schema.UpperBound != nil && value.IntValue() > *schema.UpperBound { - return SchemaSettingError{name: name, message: fmt.Sprintf("integer %s is above maximum value %d", value.String(), *schema.UpperBound)} - } - return nil - - case "String": - strLen := len(value.String()) - if schema.MinLength != nil && strLen < *schema.MinLength { - return SchemaSettingError{name: name, message: fmt.Sprintf("string %s length is below minimum length %d", value.String(), *schema.MinLength)} - } - if schema.MaxLength != nil && strLen > *schema.MaxLength { - return SchemaSettingError{name: name, message: fmt.Sprintf("string %s length is above maximum length %d", value.String(), *schema.MaxLength)} - } - return nil - - case "Boolean": - if value.String() == "true" || value.String() == "false" { - return nil - } - return SchemaSettingError{name: name, message: fmt.Sprintf("%s is not a boolean", value.String())} - - case "Password": - // Prevent sets of password types - return SchemaSettingError{name: name, message: "passwords are immutable"} - - case "": - // allow the set as BIOS registry fields may not have been available - return nil - - default: - // Unexpected attribute type - return SchemaSettingError{name: name, message: fmt.Sprintf("unexpected attribute type %s", schema.AttributeType)} - } -} - -// FirmwareSchemaSpec defines the desired state of FirmwareSchema -type FirmwareSchemaSpec struct { - - // The hardware vendor associated with this schema - // +optional - HardwareVendor string `json:"hardwareVendor,omitempty"` - - // The hardware model associated with this schema - // +optional - HardwareModel string `json:"hardwareModel,omitempty"` - - // Map of firmware name to schema - Schema map[string]SettingSchema `json:"schema" required:"true"` -} - -//+kubebuilder:object:root=true - -// FirmwareSchema is the Schema for the firmwareschemas API -type FirmwareSchema struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec FirmwareSchemaSpec `json:"spec,omitempty"` -} - -// Check whether the setting's name and value is valid using the schema -func (host *FirmwareSchema) ValidateSetting(name string, value intstr.IntOrString, schemas map[string]SettingSchema) error { - - schema, ok := schemas[name] - if !ok { - return SchemaSettingError{name: name, message: "it is not in the associated schema"} - } - - return schema.Validate(name, value) -} - -//+kubebuilder:object:root=true - -// FirmwareSchemaList contains a list of FirmwareSchema -type FirmwareSchemaList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []FirmwareSchema `json:"items"` -} - -func init() { - SchemeBuilder.Register(&FirmwareSchema{}, &FirmwareSchemaList{}) -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/groupversion_info.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/groupversion_info.go deleted file mode 100644 index eb8116d7c..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/groupversion_info.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1alpha1 contains API Schema definitions for the metal3.io v1alpha1 API group -//+kubebuilder:object:generate=true -//+groupName=metal3.io -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "metal3.io", Version: "v1alpha1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hostfirmwaresettings_types.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hostfirmwaresettings_types.go deleted file mode 100644 index 8bca68d14..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hostfirmwaresettings_types.go +++ /dev/null @@ -1,100 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -type SettingsMap map[string]string -type DesiredSettingsMap map[string]intstr.IntOrString - -type SchemaReference struct { - // `namespace` is the namespace of the where the schema is stored. - Namespace string `json:"namespace"` - // `name` is the reference to the schema. - Name string `json:"name"` -} - -type SettingsConditionType string - -const ( - // Indicates that the settings in the Spec are different than Status - UpdateRequested SettingsConditionType = "UpdateRequested" - - // Indicates if the settings are valid and can be configured on the host - SettingsValid SettingsConditionType = "Valid" -) - -// HostFirmwareSettingsSpec defines the desired state of HostFirmwareSettings -type HostFirmwareSettingsSpec struct { - - // Settings are the desired firmware settings stored as name/value pairs. - // This will be populated with the actual firmware settings and only - // contain the settings that can be modified (i.e. not ReadOnly), to - // facilitate making changes. - // +patchStrategy=merge - Settings DesiredSettingsMap `json:"settings" required:"true"` -} - -// HostFirmwareSettingsStatus defines the observed state of HostFirmwareSettings -type HostFirmwareSettingsStatus struct { - // FirmwareSchema is a reference to the Schema used to describe each - // FirmwareSetting. By default, this will be a Schema in the same - // Namespace as the settings but it can be overwritten in the Spec - FirmwareSchema *SchemaReference `json:"schema,omitempty"` - - // Settings are the actual firmware settings stored as name/value pairs - Settings SettingsMap `json:"settings" required:"true"` - - // Track whether settings stored in the spec are valid based on the schema - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - // +optional - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -} - -//+kubebuilder:object:root=true -//+kubebuilder:resource:shortName=hfs -//+kubebuilder:subresource:status - -// HostFirmwareSettings is the Schema for the hostfirmwaresettings API -type HostFirmwareSettings struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec HostFirmwareSettingsSpec `json:"spec,omitempty"` - Status HostFirmwareSettingsStatus `json:"status,omitempty"` -} - -//+kubebuilder:object:root=true - -// HostFirmwareSettingsList contains a list of HostFirmwareSettings -type HostFirmwareSettingsList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []HostFirmwareSettings `json:"items"` -} - -func init() { - SchemeBuilder.Register(&HostFirmwareSettings{}, &HostFirmwareSettingsList{}) -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/preprovisioningimage_types.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/preprovisioningimage_types.go deleted file mode 100644 index 8853a5e38..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/preprovisioningimage_types.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ImageFormat enumerates the allowed image formats -// +kubebuilder:validation:Enum=iso;initrd -type ImageFormat string - -const PreprovisioningImageFinalizer = "preprovisioningimage.metal3.io" - -const ( - ImageFormatISO ImageFormat = "iso" - ImageFormatInitRD ImageFormat = "initrd" -) - -// PreprovisioningImageSpec defines the desired state of PreprovisioningImage -type PreprovisioningImageSpec struct { - // networkDataName is the name of a Secret in the local namespace that - // contains network data to build in to the image. - // +optional - NetworkDataName string `json:"networkDataName,omitempty"` - - // architecture is the processor architecture for which to build the image. - // +optional - Architecture string `json:"architecture,omitempty"` - - // acceptFormats is a list of acceptable image formats. - // +optional - AcceptFormats []ImageFormat `json:"acceptFormats,omitempty"` -} - -type SecretStatus struct { - Name string `json:"name,omitempty"` - Version string `json:"version,omitempty"` -} - -type ImageStatusConditionType string - -const ( - // Ready indicates that the Image is available and ready to be downloaded. - ConditionImageReady ImageStatusConditionType = "Ready" - - // Error indicates that the operator was unable to build an image. - ConditionImageError ImageStatusConditionType = "Error" -) - -// PreprovisioningImageStatus defines the observed state of PreprovisioningImage -type PreprovisioningImageStatus struct { - // imageUrl is the URL from which the built image can be downloaded. - ImageUrl string `json:"imageUrl,omitempty"` - - // format is the type of image that is available at the download url: - // either iso or initrd. - // +optional - Format ImageFormat `json:"format,omitempty"` - - // networkData is a reference to the version of the Secret containing the - // network data used to build the image. - // +optional - NetworkData SecretStatus `json:"networkData,omitempty"` - - // architecture is the processor architecture for which the image is built - Architecture string `json:"architecture,omitempty"` - - // conditions describe the state of the built image - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - // +optional - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:shortName=ppimg -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status",description="Whether the image is ready" -// +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].reason",description="The reason for the image readiness status" -// +kubebuilder:subresource:status - -// PreprovisioningImage is the Schema for the preprovisioningimages API -type PreprovisioningImage struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec PreprovisioningImageSpec `json:"spec,omitempty"` - Status PreprovisioningImageStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// PreprovisioningImageList contains a list of PreprovisioningImage -type PreprovisioningImageList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []PreprovisioningImage `json:"items"` -} - -func init() { - SchemeBuilder.Register(&PreprovisioningImage{}, &PreprovisioningImageList{}) -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 0891f4eac..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,1060 +0,0 @@ -// +build !ignore_autogenerated - -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BIOS) DeepCopyInto(out *BIOS) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BIOS. -func (in *BIOS) DeepCopy() *BIOS { - if in == nil { - return nil - } - out := new(BIOS) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BMCDetails) DeepCopyInto(out *BMCDetails) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BMCDetails. -func (in *BMCDetails) DeepCopy() *BMCDetails { - if in == nil { - return nil - } - out := new(BMCDetails) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalHost) DeepCopyInto(out *BareMetalHost) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalHost. -func (in *BareMetalHost) DeepCopy() *BareMetalHost { - if in == nil { - return nil - } - out := new(BareMetalHost) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BareMetalHost) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalHostList) DeepCopyInto(out *BareMetalHostList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]BareMetalHost, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalHostList. -func (in *BareMetalHostList) DeepCopy() *BareMetalHostList { - if in == nil { - return nil - } - out := new(BareMetalHostList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BareMetalHostList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalHostSpec) DeepCopyInto(out *BareMetalHostSpec) { - *out = *in - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]v1.Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - out.BMC = in.BMC - if in.RAID != nil { - in, out := &in.RAID, &out.RAID - *out = new(RAIDConfig) - (*in).DeepCopyInto(*out) - } - if in.Firmware != nil { - in, out := &in.Firmware, &out.Firmware - *out = new(FirmwareConfig) - (*in).DeepCopyInto(*out) - } - if in.RootDeviceHints != nil { - in, out := &in.RootDeviceHints, &out.RootDeviceHints - *out = new(RootDeviceHints) - (*in).DeepCopyInto(*out) - } - if in.ConsumerRef != nil { - in, out := &in.ConsumerRef, &out.ConsumerRef - *out = new(v1.ObjectReference) - **out = **in - } - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(Image) - (*in).DeepCopyInto(*out) - } - if in.UserData != nil { - in, out := &in.UserData, &out.UserData - *out = new(v1.SecretReference) - **out = **in - } - if in.NetworkData != nil { - in, out := &in.NetworkData, &out.NetworkData - *out = new(v1.SecretReference) - **out = **in - } - if in.MetaData != nil { - in, out := &in.MetaData, &out.MetaData - *out = new(v1.SecretReference) - **out = **in - } - if in.CustomDeploy != nil { - in, out := &in.CustomDeploy, &out.CustomDeploy - *out = new(CustomDeploy) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalHostSpec. -func (in *BareMetalHostSpec) DeepCopy() *BareMetalHostSpec { - if in == nil { - return nil - } - out := new(BareMetalHostSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalHostStatus) DeepCopyInto(out *BareMetalHostStatus) { - *out = *in - if in.LastUpdated != nil { - in, out := &in.LastUpdated, &out.LastUpdated - *out = (*in).DeepCopy() - } - if in.HardwareDetails != nil { - in, out := &in.HardwareDetails, &out.HardwareDetails - *out = new(HardwareDetails) - (*in).DeepCopyInto(*out) - } - in.Provisioning.DeepCopyInto(&out.Provisioning) - in.GoodCredentials.DeepCopyInto(&out.GoodCredentials) - in.TriedCredentials.DeepCopyInto(&out.TriedCredentials) - in.OperationHistory.DeepCopyInto(&out.OperationHistory) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalHostStatus. -func (in *BareMetalHostStatus) DeepCopy() *BareMetalHostStatus { - if in == nil { - return nil - } - out := new(BareMetalHostStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CPU) DeepCopyInto(out *CPU) { - *out = *in - if in.Flags != nil { - in, out := &in.Flags, &out.Flags - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU. -func (in *CPU) DeepCopy() *CPU { - if in == nil { - return nil - } - out := new(CPU) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CredentialsStatus) DeepCopyInto(out *CredentialsStatus) { - *out = *in - if in.Reference != nil { - in, out := &in.Reference, &out.Reference - *out = new(v1.SecretReference) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialsStatus. -func (in *CredentialsStatus) DeepCopy() *CredentialsStatus { - if in == nil { - return nil - } - out := new(CredentialsStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomDeploy) DeepCopyInto(out *CustomDeploy) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDeploy. -func (in *CustomDeploy) DeepCopy() *CustomDeploy { - if in == nil { - return nil - } - out := new(CustomDeploy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in DesiredSettingsMap) DeepCopyInto(out *DesiredSettingsMap) { - { - in := &in - *out = make(DesiredSettingsMap, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DesiredSettingsMap. -func (in DesiredSettingsMap) DeepCopy() DesiredSettingsMap { - if in == nil { - return nil - } - out := new(DesiredSettingsMap) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Firmware) DeepCopyInto(out *Firmware) { - *out = *in - out.BIOS = in.BIOS -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Firmware. -func (in *Firmware) DeepCopy() *Firmware { - if in == nil { - return nil - } - out := new(Firmware) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FirmwareConfig) DeepCopyInto(out *FirmwareConfig) { - *out = *in - if in.VirtualizationEnabled != nil { - in, out := &in.VirtualizationEnabled, &out.VirtualizationEnabled - *out = new(bool) - **out = **in - } - if in.SimultaneousMultithreadingEnabled != nil { - in, out := &in.SimultaneousMultithreadingEnabled, &out.SimultaneousMultithreadingEnabled - *out = new(bool) - **out = **in - } - if in.SriovEnabled != nil { - in, out := &in.SriovEnabled, &out.SriovEnabled - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirmwareConfig. -func (in *FirmwareConfig) DeepCopy() *FirmwareConfig { - if in == nil { - return nil - } - out := new(FirmwareConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FirmwareSchema) DeepCopyInto(out *FirmwareSchema) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirmwareSchema. -func (in *FirmwareSchema) DeepCopy() *FirmwareSchema { - if in == nil { - return nil - } - out := new(FirmwareSchema) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FirmwareSchema) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FirmwareSchemaList) DeepCopyInto(out *FirmwareSchemaList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]FirmwareSchema, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirmwareSchemaList. -func (in *FirmwareSchemaList) DeepCopy() *FirmwareSchemaList { - if in == nil { - return nil - } - out := new(FirmwareSchemaList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FirmwareSchemaList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FirmwareSchemaSpec) DeepCopyInto(out *FirmwareSchemaSpec) { - *out = *in - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = make(map[string]SettingSchema, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirmwareSchemaSpec. -func (in *FirmwareSchemaSpec) DeepCopy() *FirmwareSchemaSpec { - if in == nil { - return nil - } - out := new(FirmwareSchemaSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HardwareDetails) DeepCopyInto(out *HardwareDetails) { - *out = *in - out.SystemVendor = in.SystemVendor - out.Firmware = in.Firmware - if in.NIC != nil { - in, out := &in.NIC, &out.NIC - *out = make([]NIC, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Storage != nil { - in, out := &in.Storage, &out.Storage - *out = make([]Storage, len(*in)) - copy(*out, *in) - } - in.CPU.DeepCopyInto(&out.CPU) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareDetails. -func (in *HardwareDetails) DeepCopy() *HardwareDetails { - if in == nil { - return nil - } - out := new(HardwareDetails) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HardwareRAIDVolume) DeepCopyInto(out *HardwareRAIDVolume) { - *out = *in - if in.SizeGibibytes != nil { - in, out := &in.SizeGibibytes, &out.SizeGibibytes - *out = new(int) - **out = **in - } - if in.Rotational != nil { - in, out := &in.Rotational, &out.Rotational - *out = new(bool) - **out = **in - } - if in.NumberOfPhysicalDisks != nil { - in, out := &in.NumberOfPhysicalDisks, &out.NumberOfPhysicalDisks - *out = new(int) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareRAIDVolume. -func (in *HardwareRAIDVolume) DeepCopy() *HardwareRAIDVolume { - if in == nil { - return nil - } - out := new(HardwareRAIDVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HardwareSystemVendor) DeepCopyInto(out *HardwareSystemVendor) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareSystemVendor. -func (in *HardwareSystemVendor) DeepCopy() *HardwareSystemVendor { - if in == nil { - return nil - } - out := new(HardwareSystemVendor) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostFirmwareSettings) DeepCopyInto(out *HostFirmwareSettings) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirmwareSettings. -func (in *HostFirmwareSettings) DeepCopy() *HostFirmwareSettings { - if in == nil { - return nil - } - out := new(HostFirmwareSettings) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HostFirmwareSettings) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostFirmwareSettingsList) DeepCopyInto(out *HostFirmwareSettingsList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HostFirmwareSettings, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirmwareSettingsList. -func (in *HostFirmwareSettingsList) DeepCopy() *HostFirmwareSettingsList { - if in == nil { - return nil - } - out := new(HostFirmwareSettingsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HostFirmwareSettingsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostFirmwareSettingsSpec) DeepCopyInto(out *HostFirmwareSettingsSpec) { - *out = *in - if in.Settings != nil { - in, out := &in.Settings, &out.Settings - *out = make(DesiredSettingsMap, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirmwareSettingsSpec. -func (in *HostFirmwareSettingsSpec) DeepCopy() *HostFirmwareSettingsSpec { - if in == nil { - return nil - } - out := new(HostFirmwareSettingsSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostFirmwareSettingsStatus) DeepCopyInto(out *HostFirmwareSettingsStatus) { - *out = *in - if in.FirmwareSchema != nil { - in, out := &in.FirmwareSchema, &out.FirmwareSchema - *out = new(SchemaReference) - **out = **in - } - if in.Settings != nil { - in, out := &in.Settings, &out.Settings - *out = make(SettingsMap, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirmwareSettingsStatus. -func (in *HostFirmwareSettingsStatus) DeepCopy() *HostFirmwareSettingsStatus { - if in == nil { - return nil - } - out := new(HostFirmwareSettingsStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Image) DeepCopyInto(out *Image) { - *out = *in - if in.DiskFormat != nil { - in, out := &in.DiskFormat, &out.DiskFormat - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. -func (in *Image) DeepCopy() *Image { - if in == nil { - return nil - } - out := new(Image) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NIC) DeepCopyInto(out *NIC) { - *out = *in - if in.VLANs != nil { - in, out := &in.VLANs, &out.VLANs - *out = make([]VLAN, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NIC. -func (in *NIC) DeepCopy() *NIC { - if in == nil { - return nil - } - out := new(NIC) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperationHistory) DeepCopyInto(out *OperationHistory) { - *out = *in - in.Register.DeepCopyInto(&out.Register) - in.Inspect.DeepCopyInto(&out.Inspect) - in.Provision.DeepCopyInto(&out.Provision) - in.Deprovision.DeepCopyInto(&out.Deprovision) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperationHistory. -func (in *OperationHistory) DeepCopy() *OperationHistory { - if in == nil { - return nil - } - out := new(OperationHistory) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperationMetric) DeepCopyInto(out *OperationMetric) { - *out = *in - in.Start.DeepCopyInto(&out.Start) - in.End.DeepCopyInto(&out.End) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperationMetric. -func (in *OperationMetric) DeepCopy() *OperationMetric { - if in == nil { - return nil - } - out := new(OperationMetric) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreprovisioningImage) DeepCopyInto(out *PreprovisioningImage) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreprovisioningImage. -func (in *PreprovisioningImage) DeepCopy() *PreprovisioningImage { - if in == nil { - return nil - } - out := new(PreprovisioningImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PreprovisioningImage) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreprovisioningImageList) DeepCopyInto(out *PreprovisioningImageList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PreprovisioningImage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreprovisioningImageList. -func (in *PreprovisioningImageList) DeepCopy() *PreprovisioningImageList { - if in == nil { - return nil - } - out := new(PreprovisioningImageList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PreprovisioningImageList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreprovisioningImageSpec) DeepCopyInto(out *PreprovisioningImageSpec) { - *out = *in - if in.AcceptFormats != nil { - in, out := &in.AcceptFormats, &out.AcceptFormats - *out = make([]ImageFormat, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreprovisioningImageSpec. -func (in *PreprovisioningImageSpec) DeepCopy() *PreprovisioningImageSpec { - if in == nil { - return nil - } - out := new(PreprovisioningImageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreprovisioningImageStatus) DeepCopyInto(out *PreprovisioningImageStatus) { - *out = *in - out.NetworkData = in.NetworkData - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreprovisioningImageStatus. -func (in *PreprovisioningImageStatus) DeepCopy() *PreprovisioningImageStatus { - if in == nil { - return nil - } - out := new(PreprovisioningImageStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProvisionStatus) DeepCopyInto(out *ProvisionStatus) { - *out = *in - in.Image.DeepCopyInto(&out.Image) - if in.RootDeviceHints != nil { - in, out := &in.RootDeviceHints, &out.RootDeviceHints - *out = new(RootDeviceHints) - (*in).DeepCopyInto(*out) - } - if in.RAID != nil { - in, out := &in.RAID, &out.RAID - *out = new(RAIDConfig) - (*in).DeepCopyInto(*out) - } - if in.Firmware != nil { - in, out := &in.Firmware, &out.Firmware - *out = new(FirmwareConfig) - (*in).DeepCopyInto(*out) - } - if in.CustomDeploy != nil { - in, out := &in.CustomDeploy, &out.CustomDeploy - *out = new(CustomDeploy) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionStatus. -func (in *ProvisionStatus) DeepCopy() *ProvisionStatus { - if in == nil { - return nil - } - out := new(ProvisionStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RAIDConfig) DeepCopyInto(out *RAIDConfig) { - *out = *in - if in.HardwareRAIDVolumes != nil { - in, out := &in.HardwareRAIDVolumes, &out.HardwareRAIDVolumes - *out = make([]HardwareRAIDVolume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SoftwareRAIDVolumes != nil { - in, out := &in.SoftwareRAIDVolumes, &out.SoftwareRAIDVolumes - *out = make([]SoftwareRAIDVolume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RAIDConfig. -func (in *RAIDConfig) DeepCopy() *RAIDConfig { - if in == nil { - return nil - } - out := new(RAIDConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RebootAnnotationArguments) DeepCopyInto(out *RebootAnnotationArguments) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RebootAnnotationArguments. -func (in *RebootAnnotationArguments) DeepCopy() *RebootAnnotationArguments { - if in == nil { - return nil - } - out := new(RebootAnnotationArguments) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RootDeviceHints) DeepCopyInto(out *RootDeviceHints) { - *out = *in - if in.Rotational != nil { - in, out := &in.Rotational, &out.Rotational - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootDeviceHints. -func (in *RootDeviceHints) DeepCopy() *RootDeviceHints { - if in == nil { - return nil - } - out := new(RootDeviceHints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SchemaReference) DeepCopyInto(out *SchemaReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaReference. -func (in *SchemaReference) DeepCopy() *SchemaReference { - if in == nil { - return nil - } - out := new(SchemaReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SchemaSettingError) DeepCopyInto(out *SchemaSettingError) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaSettingError. -func (in *SchemaSettingError) DeepCopy() *SchemaSettingError { - if in == nil { - return nil - } - out := new(SchemaSettingError) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretStatus) DeepCopyInto(out *SecretStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretStatus. -func (in *SecretStatus) DeepCopy() *SecretStatus { - if in == nil { - return nil - } - out := new(SecretStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SettingSchema) DeepCopyInto(out *SettingSchema) { - *out = *in - if in.AllowableValues != nil { - in, out := &in.AllowableValues, &out.AllowableValues - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.LowerBound != nil { - in, out := &in.LowerBound, &out.LowerBound - *out = new(int) - **out = **in - } - if in.UpperBound != nil { - in, out := &in.UpperBound, &out.UpperBound - *out = new(int) - **out = **in - } - if in.MinLength != nil { - in, out := &in.MinLength, &out.MinLength - *out = new(int) - **out = **in - } - if in.MaxLength != nil { - in, out := &in.MaxLength, &out.MaxLength - *out = new(int) - **out = **in - } - if in.ReadOnly != nil { - in, out := &in.ReadOnly, &out.ReadOnly - *out = new(bool) - **out = **in - } - if in.ResetRequired != nil { - in, out := &in.ResetRequired, &out.ResetRequired - *out = new(bool) - **out = **in - } - if in.Unique != nil { - in, out := &in.Unique, &out.Unique - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingSchema. -func (in *SettingSchema) DeepCopy() *SettingSchema { - if in == nil { - return nil - } - out := new(SettingSchema) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in SettingsMap) DeepCopyInto(out *SettingsMap) { - { - in := &in - *out = make(SettingsMap, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsMap. -func (in SettingsMap) DeepCopy() SettingsMap { - if in == nil { - return nil - } - out := new(SettingsMap) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SoftwareRAIDVolume) DeepCopyInto(out *SoftwareRAIDVolume) { - *out = *in - if in.SizeGibibytes != nil { - in, out := &in.SizeGibibytes, &out.SizeGibibytes - *out = new(int) - **out = **in - } - if in.PhysicalDisks != nil { - in, out := &in.PhysicalDisks, &out.PhysicalDisks - *out = make([]RootDeviceHints, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareRAIDVolume. -func (in *SoftwareRAIDVolume) DeepCopy() *SoftwareRAIDVolume { - if in == nil { - return nil - } - out := new(SoftwareRAIDVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Storage) DeepCopyInto(out *Storage) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. -func (in *Storage) DeepCopy() *Storage { - if in == nil { - return nil - } - out := new(Storage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VLAN) DeepCopyInto(out *VLAN) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VLAN. -func (in *VLAN) DeepCopy() *VLAN { - if in == nil { - return nil - } - out := new(VLAN) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/LICENSE b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/access.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/access.go deleted file mode 100644 index 751fcf8ff..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/access.go +++ /dev/null @@ -1,152 +0,0 @@ -package bmc - -import ( - "fmt" - "net" - "net/url" - "strings" - - "github.com/pkg/errors" -) - -// AccessDetailsFactory describes a callable that returns a new -// AccessDetails based on the input parameters. -type AccessDetailsFactory func(parsedURL *url.URL, disableCertificateVerification bool) (AccessDetails, error) - -var factories = map[string]AccessDetailsFactory{} - -// RegisterFactory maps a BMC type name to an AccessDetailsFactory, -// with optional scheme extensions. -// -// RegisterFactory("bmcname", theFunc, []string{"http", "https"}) -// maps "bmcname", "bmcname+http", and "bmcname+https" to theFunc -func RegisterFactory(name string, factory AccessDetailsFactory, schemes []string) { - factories[name] = factory - - for _, scheme := range schemes { - factories[fmt.Sprintf("%s+%s", name, scheme)] = factory - } -} - -type FirmwareConfig struct { - // Supports the virtualization of platform hardware. - VirtualizationEnabled *bool - - // Allows a single physical processor core to appear as several logical processors. - SimultaneousMultithreadingEnabled *bool - - // SR-IOV support enables a hypervisor to create virtual instances of a PCI-express device, potentially increasing performance. - SriovEnabled *bool -} - -// AccessDetails contains the information about how to get to a BMC. -// -// NOTE(dhellmann): This structure is very likely to change as we -// adapt it to additional types. -type AccessDetails interface { - // Type returns the kind of the BMC, indicating the driver that - // will be used to communicate with it. - Type() string - - // NeedsMAC returns true when the host is going to need a separate - // port created rather than having it discovered. - NeedsMAC() bool - - // The name of the driver to instantiate the BMC with. This may differ - // from the Type - both the ipmi and libvirt types use the ipmi driver. - Driver() string - - // DriverInfo returns a data structure to pass as the DriverInfo - // parameter when creating a node in Ironic. The structure is - // pre-populated with the access information, and the caller is - // expected to add any other information that might be needed - // (such as the kernel and ramdisk locations). - DriverInfo(bmcCreds Credentials) map[string]interface{} - - BIOSInterface() string - - // Boot interface to set - BootInterface() string - - ManagementInterface() string - PowerInterface() string - RAIDInterface() string - VendorInterface() string - - // Whether the driver supports changing secure boot state. - SupportsSecureBoot() bool - - // Whether the driver supports booting a preprovisioning image in ISO format - SupportsISOPreprovisioningImage() bool - - // RequiresProvisioningNetwork checks the driver requires provisioning network - RequiresProvisioningNetwork() bool - - // Build bios clean steps for ironic - BuildBIOSSettings(firmwareConfig *FirmwareConfig) (settings []map[string]string, err error) -} - -func getParsedURL(address string) (parsedURL *url.URL, err error) { - // Start by assuming "type://host:port" - parsedURL, err = url.Parse(address) - if err != nil { - // We failed to parse the URL, but it may just be a host or - // host:port string (which the URL parser rejects because ":" - // is not allowed in the first segment of a - // path. Unfortunately there is no error class to represent - // that specific error, so we have to guess. - if strings.Contains(address, ":") { - // If we can parse host:port, carry on with those - // values. Otherwise, report the original parser error. - _, _, err2 := net.SplitHostPort(address) - if err2 != nil { - return nil, errors.Wrap(err, "failed to parse BMC address information") - } - } - parsedURL = &url.URL{ - Scheme: "ipmi", - Host: address, - } - } else { - // Successfully parsed the URL - if parsedURL.Opaque != "" { - parsedURL, err = url.Parse(strings.Replace(address, ":", "://", 1)) - if err != nil { - return nil, errors.Wrap(err, "failed to parse BMC address information") - - } - } - if parsedURL.Scheme == "" { - if parsedURL.Hostname() == "" { - // If there was no scheme at all, the hostname was - // interpreted as a path. - parsedURL, err = url.Parse(strings.Join([]string{"ipmi://", address}, "")) - if err != nil { - return nil, errors.Wrap(err, "failed to parse BMC address information") - } - } - } - } - return parsedURL, nil -} - -// NewAccessDetails creates an AccessDetails structure from the URL -// for a BMC. -func NewAccessDetails(address string, disableCertificateVerification bool) (AccessDetails, error) { - - if address == "" { - return nil, errors.New("missing BMC address") - } - - parsedURL, err := getParsedURL(address) - if err != nil { - return nil, err - } - - factory, ok := factories[parsedURL.Scheme] - if !ok { - return nil, &UnknownBMCTypeError{address, parsedURL.Scheme} - } - - return factory(parsedURL, disableCertificateVerification) -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/credentials.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/credentials.go deleted file mode 100644 index cbf28a50f..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/credentials.go +++ /dev/null @@ -1,18 +0,0 @@ -package bmc - -// Credentials holds the information for authenticating with the BMC. -type Credentials struct { - Username string - Password string -} - -// Validate returns an error if the credentials are invalid -func (creds Credentials) Validate() error { - if creds.Username == "" { - return &CredentialsValidationError{message: "Missing BMC connection detail 'username' in credentials"} - } - if creds.Password == "" { - return &CredentialsValidationError{message: "Missing BMC connection details 'password' in credentials"} - } - return nil -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/errors.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/errors.go deleted file mode 100644 index e018fa66b..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/errors.go +++ /dev/null @@ -1,28 +0,0 @@ -package bmc - -import ( - "fmt" -) - -// UnknownBMCTypeError is returned when the provided BMC address cannot be -// mapped to a driver. -type UnknownBMCTypeError struct { - address string - bmcType string -} - -func (e UnknownBMCTypeError) Error() string { - return fmt.Sprintf("Unknown BMC type '%s' for address %s", - e.bmcType, e.address) -} - -// CredentialsValidationError is returned when the provided BMC credentials -// are invalid (e.g. null) -type CredentialsValidationError struct { - message string -} - -func (e CredentialsValidationError) Error() string { - return fmt.Sprintf("Validation error with BMC credentials: %s", - e.message) -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/ibmc.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/ibmc.go deleted file mode 100644 index ba8185b32..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/ibmc.go +++ /dev/null @@ -1,123 +0,0 @@ -package bmc - -import ( - "fmt" - "net/url" - "strings" -) - -func init() { - RegisterFactory("ibmc", newIbmcAccessDetails, []string{"http", "https"}) -} - -func newIbmcAccessDetails(parsedURL *url.URL, disableCertificateVerification bool) (AccessDetails, error) { - return &ibmcAccessDetails{ - bmcType: parsedURL.Scheme, - host: parsedURL.Host, - path: parsedURL.Path, - disableCertificateVerification: disableCertificateVerification, - }, nil -} - -type ibmcAccessDetails struct { - bmcType string - host string - path string - disableCertificateVerification bool -} - -func (a *ibmcAccessDetails) Type() string { - return a.bmcType -} - -// NeedsMAC returns true when the host is going to need a separate -// port created rather than having it discovered. -func (a *ibmcAccessDetails) NeedsMAC() bool { - // For the inspection to work, we need a MAC address - // https://github.com/metal3-io/baremetal-operator/pull/284#discussion_r317579040 - return true -} - -func (a *ibmcAccessDetails) Driver() string { - return "ibmc" -} - -func (a *ibmcAccessDetails) DisableCertificateVerification() bool { - return a.disableCertificateVerification -} - -const ibmcDefaultScheme = "https" - -// DriverInfo returns a data structure to pass as the DriverInfo -// parameter when creating a node in Ironic. The structure is -// pre-populated with the access information, and the caller is -// expected to add any other information that might be needed (such as -// the kernel and ramdisk locations). -func (a *ibmcAccessDetails) DriverInfo(bmcCreds Credentials) map[string]interface{} { - - ibmcAddress := []string{} - schemes := strings.Split(a.bmcType, "+") - if len(schemes) > 1 { - ibmcAddress = append(ibmcAddress, schemes[1]) - } else { - ibmcAddress = append(ibmcAddress, ibmcDefaultScheme) - } - ibmcAddress = append(ibmcAddress, "://") - ibmcAddress = append(ibmcAddress, a.host) - ibmcAddress = append(ibmcAddress, a.path) - - result := map[string]interface{}{ - "ibmc_username": bmcCreds.Username, - "ibmc_password": bmcCreds.Password, - "ibmc_address": strings.Join(ibmcAddress, ""), - } - - if a.disableCertificateVerification { - result["ibmc_verify_ca"] = false - } - - return result -} - -func (a *ibmcAccessDetails) BIOSInterface() string { - return "" -} - -func (a *ibmcAccessDetails) BootInterface() string { - return "pxe" -} - -func (a *ibmcAccessDetails) ManagementInterface() string { - return "ibmc" -} - -func (a *ibmcAccessDetails) PowerInterface() string { - return "ibmc" -} - -func (a *ibmcAccessDetails) RAIDInterface() string { - return "no-raid" -} - -func (a *ibmcAccessDetails) VendorInterface() string { - return "" -} - -func (a *ibmcAccessDetails) SupportsSecureBoot() bool { - return false -} - -func (a *ibmcAccessDetails) SupportsISOPreprovisioningImage() bool { - return false -} - -func (a *ibmcAccessDetails) RequiresProvisioningNetwork() bool { - return true -} - -func (a *ibmcAccessDetails) BuildBIOSSettings(firmwareConfig *FirmwareConfig) (settings []map[string]string, err error) { - if firmwareConfig != nil { - return nil, fmt.Errorf("firmware settings for %s are not supported", a.Driver()) - } - return nil, nil -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/idrac.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/idrac.go deleted file mode 100644 index b7b8cb844..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/idrac.go +++ /dev/null @@ -1,162 +0,0 @@ -package bmc - -import ( - "net/url" - "strings" -) - -func init() { - RegisterFactory("idrac", newIDRACAccessDetails, []string{"http", "https"}) -} - -func newIDRACAccessDetails(parsedURL *url.URL, disableCertificateVerification bool) (AccessDetails, error) { - return &iDracAccessDetails{ - bmcType: parsedURL.Scheme, - portNum: parsedURL.Port(), - hostname: parsedURL.Hostname(), - path: parsedURL.Path, - disableCertificateVerification: disableCertificateVerification, - }, nil -} - -type iDracAccessDetails struct { - bmcType string - portNum string - hostname string - path string - disableCertificateVerification bool -} - -func (a *iDracAccessDetails) Type() string { - return a.bmcType -} - -// NeedsMAC returns true when the host is going to need a separate -// port created rather than having it discovered. -func (a *iDracAccessDetails) NeedsMAC() bool { - return false -} - -func (a *iDracAccessDetails) Driver() string { - return "idrac" -} - -func (a *iDracAccessDetails) DisableCertificateVerification() bool { - return a.disableCertificateVerification -} - -// DriverInfo returns a data structure to pass as the DriverInfo -// parameter when creating a node in Ironic. The structure is -// pre-populated with the access information, and the caller is -// expected to add any other information that might be needed (such as -// the kernel and ramdisk locations). -func (a *iDracAccessDetails) DriverInfo(bmcCreds Credentials) map[string]interface{} { - result := map[string]interface{}{ - "drac_username": bmcCreds.Username, - "drac_password": bmcCreds.Password, - "drac_address": a.hostname, - } - if a.disableCertificateVerification { - result["drac_verify_ca"] = false - } - - schemes := strings.Split(a.bmcType, "+") - if len(schemes) > 1 { - result["drac_protocol"] = schemes[1] - } - if a.portNum != "" { - result["drac_port"] = a.portNum - } - if a.path != "" { - result["drac_path"] = a.path - } - - return result -} - -func (a *iDracAccessDetails) BIOSInterface() string { - return "" -} - -func (a *iDracAccessDetails) BootInterface() string { - return "ipxe" -} - -func (a *iDracAccessDetails) ManagementInterface() string { - return "" -} - -func (a *iDracAccessDetails) PowerInterface() string { - return "" -} - -func (a *iDracAccessDetails) RAIDInterface() string { - return "idrac-wsman" -} - -func (a *iDracAccessDetails) VendorInterface() string { - return "" -} - -// NOTE(dtantsur): change to true if we switch to redfish-based implementations -// by default. -func (a *iDracAccessDetails) SupportsSecureBoot() bool { - return false -} - -func (a *iDracAccessDetails) SupportsISOPreprovisioningImage() bool { - return false -} - -func (a *iDracAccessDetails) RequiresProvisioningNetwork() bool { - return true -} - -func (a *iDracAccessDetails) BuildBIOSSettings(firmwareConfig *FirmwareConfig) (settings []map[string]string, err error) { - if firmwareConfig == nil { - return nil, nil - } - - var value string - - if firmwareConfig.VirtualizationEnabled != nil { - value = "Disabled" - if *firmwareConfig.VirtualizationEnabled { - value = "Enabled" - } - settings = append(settings, - map[string]string{ - "name": "ProcVirtualization", - "value": value, - }, - ) - } - - if firmwareConfig.SimultaneousMultithreadingEnabled != nil { - value = "Disabled" - if *firmwareConfig.SimultaneousMultithreadingEnabled { - value = "Enabled" - } - settings = append(settings, - map[string]string{ - "name": "LogicalProc", - "value": value, - }, - ) - } - - if firmwareConfig.SriovEnabled != nil { - value = "Disabled" - if *firmwareConfig.SriovEnabled { - value = "Enabled" - } - settings = append(settings, - map[string]string{ - "name": "SriovGlobalEnable", - "value": value, - }, - ) - } - - return -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/idrac_virtualmedia.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/idrac_virtualmedia.go deleted file mode 100644 index 7ae916d8e..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/idrac_virtualmedia.go +++ /dev/null @@ -1,113 +0,0 @@ -package bmc - -import ( - "fmt" - "net/url" -) - -func init() { - schemes := []string{"http", "https"} - RegisterFactory("idrac-virtualmedia", newRedfishiDracVirtualMediaAccessDetails, schemes) -} - -func newRedfishiDracVirtualMediaAccessDetails(parsedURL *url.URL, disableCertificateVerification bool) (AccessDetails, error) { - return &redfishiDracVirtualMediaAccessDetails{ - bmcType: parsedURL.Scheme, - host: parsedURL.Host, - path: parsedURL.Path, - disableCertificateVerification: disableCertificateVerification, - }, nil -} - -type redfishiDracVirtualMediaAccessDetails struct { - bmcType string - host string - path string - disableCertificateVerification bool -} - -func (a *redfishiDracVirtualMediaAccessDetails) Type() string { - return a.bmcType -} - -// NeedsMAC returns true when the host is going to need a separate -// port created rather than having it discovered. -func (a *redfishiDracVirtualMediaAccessDetails) NeedsMAC() bool { - // For the inspection to work, we need a MAC address - // https://github.com/metal3-io/baremetal-operator/pull/284#discussion_r317579040 - return true -} - -func (a *redfishiDracVirtualMediaAccessDetails) DisableCertificateVerification() bool { - return a.disableCertificateVerification -} - -// DriverInfo returns a data structure to pass as the DriverInfo -// parameter when creating a node in Ironic. The structure is -// pre-populated with the access information, and the caller is -// expected to add any other information that might be needed (such as -// the kernel and ramdisk locations). -func (a *redfishiDracVirtualMediaAccessDetails) DriverInfo(bmcCreds Credentials) map[string]interface{} { - result := map[string]interface{}{ - "redfish_system_id": a.path, - "redfish_username": bmcCreds.Username, - "redfish_password": bmcCreds.Password, - "redfish_address": getRedfishAddress(a.bmcType, a.host), - } - - if a.disableCertificateVerification { - result["redfish_verify_ca"] = false - } - - return result -} - -// iDrac Virtual Media Overrides - -func (a *redfishiDracVirtualMediaAccessDetails) Driver() string { - return "idrac" -} - -func (a *redfishiDracVirtualMediaAccessDetails) BIOSInterface() string { - return "idrac-redfish" -} - -func (a *redfishiDracVirtualMediaAccessDetails) BootInterface() string { - return "idrac-redfish-virtual-media" -} - -func (a *redfishiDracVirtualMediaAccessDetails) ManagementInterface() string { - return "idrac-redfish" -} - -func (a *redfishiDracVirtualMediaAccessDetails) PowerInterface() string { - return "idrac-redfish" -} - -func (a *redfishiDracVirtualMediaAccessDetails) RAIDInterface() string { - return "no-raid" -} - -func (a *redfishiDracVirtualMediaAccessDetails) VendorInterface() string { - // NOTE(dtantsur): the idrac hardware type defaults to WSMAN vendor, we need to use the Redfish implementation. - return "idrac-redfish" -} - -func (a *redfishiDracVirtualMediaAccessDetails) SupportsSecureBoot() bool { - return true -} - -func (a *redfishiDracVirtualMediaAccessDetails) SupportsISOPreprovisioningImage() bool { - return true -} - -func (a *redfishiDracVirtualMediaAccessDetails) RequiresProvisioningNetwork() bool { - return false -} - -func (a *redfishiDracVirtualMediaAccessDetails) BuildBIOSSettings(firmwareConfig *FirmwareConfig) (settings []map[string]string, err error) { - if firmwareConfig != nil { - return nil, fmt.Errorf("firmware settings for %s are not supported", a.Driver()) - } - return nil, nil -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/ilo4.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/ilo4.go deleted file mode 100644 index bff98bc60..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/ilo4.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (c) 2016-2018 Hewlett Packard Enterprise Development LP - -package bmc - -import ( - "net/url" -) - -func init() { - RegisterFactory("ilo4", newILOAccessDetails, []string{"https"}) - RegisterFactory("ilo4-virtualmedia", newILOVirtualMediaAccessDetails, []string{"https"}) -} - -func newILOAccessDetails(parsedURL *url.URL, disableCertificateVerification bool) (AccessDetails, error) { - return &iLOAccessDetails{ - bmcType: parsedURL.Scheme, - portNum: parsedURL.Port(), - hostname: parsedURL.Hostname(), - disableCertificateVerification: disableCertificateVerification, - }, nil -} - -func newILOVirtualMediaAccessDetails(parsedURL *url.URL, disableCertificateVerification bool) (AccessDetails, error) { - return &iLOAccessDetails{ - bmcType: parsedURL.Scheme, - portNum: parsedURL.Port(), - hostname: parsedURL.Hostname(), - disableCertificateVerification: disableCertificateVerification, - useVirtualMedia: true, - }, nil -} - -type iLOAccessDetails struct { - bmcType string - portNum string - hostname string - disableCertificateVerification bool - useVirtualMedia bool -} - -func (a *iLOAccessDetails) Type() string { - return a.bmcType -} - -// NeedsMAC returns true when the host is going to need a separate -// port created rather than having it discovered. -func (a *iLOAccessDetails) NeedsMAC() bool { - // For the inspection to work, we need a MAC address - // https://github.com/metal3-io/baremetal-operator/pull/284#discussion_r317579040 - return true -} - -func (a *iLOAccessDetails) Driver() string { - return "ilo" -} - -func (a *iLOAccessDetails) DisableCertificateVerification() bool { - return a.disableCertificateVerification -} - -// DriverInfo returns a data structure to pass as the DriverInfo -// parameter when creating a node in Ironic. The structure is -// pre-populated with the access information, and the caller is -// expected to add any other information that might be needed (such as -// the kernel and ramdisk locations). -func (a *iLOAccessDetails) DriverInfo(bmcCreds Credentials) map[string]interface{} { - - result := map[string]interface{}{ - "ilo_username": bmcCreds.Username, - "ilo_password": bmcCreds.Password, - "ilo_address": a.hostname, - } - - if a.disableCertificateVerification { - result["ilo_verify_ca"] = false - } - - if a.portNum != "" { - result["client_port"] = a.portNum - } - - return result -} - -func (a *iLOAccessDetails) BIOSInterface() string { - return "" -} - -func (a *iLOAccessDetails) BootInterface() string { - if a.useVirtualMedia { - return "ilo-virtual-media" - } else { - return "ilo-ipxe" - } -} - -func (a *iLOAccessDetails) ManagementInterface() string { - return "" -} - -func (a *iLOAccessDetails) PowerInterface() string { - return "" -} - -func (a *iLOAccessDetails) RAIDInterface() string { - return "no-raid" -} - -func (a *iLOAccessDetails) VendorInterface() string { - return "" -} - -func (a *iLOAccessDetails) SupportsSecureBoot() bool { - return true -} - -func (a *iLOAccessDetails) SupportsISOPreprovisioningImage() bool { - return a.useVirtualMedia -} - -func (a *iLOAccessDetails) RequiresProvisioningNetwork() bool { - return !a.useVirtualMedia -} - -func (a *iLOAccessDetails) BuildBIOSSettings(firmwareConfig *FirmwareConfig) (settings []map[string]string, err error) { - if firmwareConfig == nil { - return nil, nil - } - - var value string - - if firmwareConfig.VirtualizationEnabled != nil { - value = "Disabled" - if *firmwareConfig.VirtualizationEnabled { - value = "Enabled" - } - settings = append(settings, - map[string]string{ - "name": "ProcVirtualization", - "value": value, - }, - ) - } - - if firmwareConfig.SimultaneousMultithreadingEnabled != nil { - value = "Disabled" - if *firmwareConfig.SimultaneousMultithreadingEnabled { - value = "Enabled" - } - settings = append(settings, - map[string]string{ - "name": "ProcHyperthreading", - "value": value, - }, - ) - } - - if firmwareConfig.SriovEnabled != nil { - value = "Disabled" - if *firmwareConfig.SriovEnabled { - value = "Enabled" - } - settings = append(settings, - map[string]string{ - "name": "Sriov", - "value": value, - }, - ) - } - - return -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/ilo5.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/ilo5.go deleted file mode 100644 index 2cfbee1fb..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/ilo5.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2016-2018 Hewlett Packard Enterprise Development LP - -package bmc - -import ( - "net/url" -) - -func init() { - RegisterFactory("ilo5", newILO5AccessDetails, []string{"https"}) -} - -func newILO5AccessDetails(parsedURL *url.URL, disableCertificateVerification bool) (AccessDetails, error) { - return &iLO5AccessDetails{ - bmcType: parsedURL.Scheme, - portNum: parsedURL.Port(), - hostname: parsedURL.Hostname(), - disableCertificateVerification: disableCertificateVerification, - }, nil -} - -type iLO5AccessDetails struct { - bmcType string - portNum string - hostname string - disableCertificateVerification bool -} - -func (a *iLO5AccessDetails) Type() string { - return a.bmcType -} - -// NeedsMAC returns true when the host is going to need a separate -// port created rather than having it discovered. -func (a *iLO5AccessDetails) NeedsMAC() bool { - // For the inspection to work, we need a MAC address - // https://github.com/metal3-io/baremetal-operator/pull/284#discussion_r317579040 - return true -} - -func (a *iLO5AccessDetails) Driver() string { - return "ilo5" -} - -func (a *iLO5AccessDetails) DisableCertificateVerification() bool { - return a.disableCertificateVerification -} - -// DriverInfo returns a data structure to pass as the DriverInfo -// parameter when creating a node in Ironic. The structure is -// pre-populated with the access information, and the caller is -// expected to add any other information that might be needed (such as -// the kernel and ramdisk locations). -func (a *iLO5AccessDetails) DriverInfo(bmcCreds Credentials) map[string]interface{} { - - result := map[string]interface{}{ - "ilo_username": bmcCreds.Username, - "ilo_password": bmcCreds.Password, - "ilo_address": a.hostname, - } - - if a.disableCertificateVerification { - result["ilo_verify_ca"] = false - } - - if a.portNum != "" { - result["client_port"] = a.portNum - } - - return result -} - -func (a *iLO5AccessDetails) BIOSInterface() string { - return "" -} - -func (a *iLO5AccessDetails) BootInterface() string { - return "ilo-ipxe" -} - -func (a *iLO5AccessDetails) ManagementInterface() string { - return "" -} - -func (a *iLO5AccessDetails) PowerInterface() string { - return "" -} - -func (a *iLO5AccessDetails) RAIDInterface() string { - return "ilo5" -} - -func (a *iLO5AccessDetails) VendorInterface() string { - return "" -} - -func (a *iLO5AccessDetails) SupportsSecureBoot() bool { - return true -} - -func (a *iLO5AccessDetails) SupportsISOPreprovisioningImage() bool { - return false -} - -func (a *iLO5AccessDetails) RequiresProvisioningNetwork() bool { - return true -} - -func (a *iLO5AccessDetails) BuildBIOSSettings(firmwareConfig *FirmwareConfig) (settings []map[string]string, err error) { - if firmwareConfig == nil { - return nil, nil - } - - var value string - - if firmwareConfig.VirtualizationEnabled != nil { - value = "Disabled" - if *firmwareConfig.VirtualizationEnabled { - value = "Enabled" - } - settings = append(settings, - map[string]string{ - "name": "ProcVirtualization", - "value": value, - }, - ) - } - - if firmwareConfig.SimultaneousMultithreadingEnabled != nil { - value = "Disabled" - if *firmwareConfig.SimultaneousMultithreadingEnabled { - value = "Enabled" - } - settings = append(settings, - map[string]string{ - "name": "ProcHyperthreading", - "value": value, - }, - ) - } - - if firmwareConfig.SriovEnabled != nil { - value = "Disabled" - if *firmwareConfig.SriovEnabled { - value = "Enabled" - } - settings = append(settings, - map[string]string{ - "name": "Sriov", - "value": value, - }, - ) - } - - return -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/ipmi.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/ipmi.go deleted file mode 100644 index c4a29d00c..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/ipmi.go +++ /dev/null @@ -1,133 +0,0 @@ -package bmc - -import ( - "fmt" - "net/url" -) - -func init() { - RegisterFactory("ipmi", newIPMIAccessDetails, []string{}) - RegisterFactory("libvirt", newIPMIAccessDetails, []string{}) -} - -func getPrivilegeLevel(rawquery string) string { - privilegelevel := "ADMINISTRATOR" - q, err := url.ParseQuery(rawquery) - if err != nil { - return privilegelevel - } - if val, ok := q["privilegelevel"]; ok { - return val[0] - } - return privilegelevel -} - -func newIPMIAccessDetails(parsedURL *url.URL, disableCertificateVerification bool) (AccessDetails, error) { - return &ipmiAccessDetails{ - bmcType: parsedURL.Scheme, - portNum: parsedURL.Port(), - hostname: parsedURL.Hostname(), - privilegelevel: getPrivilegeLevel(parsedURL.RawQuery), - disableCertificateVerification: disableCertificateVerification, - }, nil -} - -type ipmiAccessDetails struct { - bmcType string - portNum string - hostname string - privilegelevel string - disableCertificateVerification bool -} - -const ipmiDefaultPort = "623" - -func (a *ipmiAccessDetails) Type() string { - return a.bmcType -} - -// NeedsMAC returns true when the host is going to need a separate -// port created rather than having it discovered. -func (a *ipmiAccessDetails) NeedsMAC() bool { - // libvirt-based hosts used for dev and testing require a MAC - // address, specified as part of the host, but we don't want the - // provisioner to have to know the rules about which drivers - // require what so we hide that detail inside this class and just - // let the provisioner know that "some" drivers require a MAC and - // it should ask. - return a.bmcType == "libvirt" -} - -func (a *ipmiAccessDetails) Driver() string { - return "ipmi" -} - -func (a *ipmiAccessDetails) DisableCertificateVerification() bool { - return a.disableCertificateVerification -} - -// DriverInfo returns a data structure to pass as the DriverInfo -// parameter when creating a node in Ironic. The structure is -// pre-populated with the access information, and the caller is -// expected to add any other information that might be needed (such as -// the kernel and ramdisk locations). -func (a *ipmiAccessDetails) DriverInfo(bmcCreds Credentials) map[string]interface{} { - result := map[string]interface{}{ - "ipmi_port": a.portNum, - "ipmi_username": bmcCreds.Username, - "ipmi_password": bmcCreds.Password, - "ipmi_address": a.hostname, - "ipmi_priv_level": a.privilegelevel, - } - - if a.disableCertificateVerification { - result["ipmi_verify_ca"] = false - } - if a.portNum == "" { - result["ipmi_port"] = ipmiDefaultPort - } - return result -} - -func (a *ipmiAccessDetails) BIOSInterface() string { - return "" -} - -func (a *ipmiAccessDetails) BootInterface() string { - return "ipxe" -} - -func (a *ipmiAccessDetails) ManagementInterface() string { - return "" -} - -func (a *ipmiAccessDetails) PowerInterface() string { - return "" -} - -func (a *ipmiAccessDetails) RAIDInterface() string { - return "no-raid" -} - -func (a *ipmiAccessDetails) VendorInterface() string { - return "" -} - -func (a *ipmiAccessDetails) SupportsSecureBoot() bool { - return false -} - -func (a *ipmiAccessDetails) SupportsISOPreprovisioningImage() bool { - return false -} - -func (a *ipmiAccessDetails) RequiresProvisioningNetwork() bool { - return true -} - -func (a *ipmiAccessDetails) BuildBIOSSettings(firmwareConfig *FirmwareConfig) (settings []map[string]string, err error) { - if firmwareConfig != nil { - return nil, fmt.Errorf("firmware settings for %s are not supported", a.Driver()) - } - return nil, nil -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/irmc.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/irmc.go deleted file mode 100644 index c6b12f838..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/irmc.go +++ /dev/null @@ -1,154 +0,0 @@ -package bmc - -import ( - "net/url" -) - -func init() { - RegisterFactory("irmc", newIRMCAccessDetails, []string{}) -} - -func newIRMCAccessDetails(parsedURL *url.URL, disableCertificateVerification bool) (AccessDetails, error) { - return &iRMCAccessDetails{ - bmcType: parsedURL.Scheme, - portNum: parsedURL.Port(), - hostname: parsedURL.Hostname(), - disableCertificateVerification: disableCertificateVerification, - }, nil -} - -type iRMCAccessDetails struct { - bmcType string - portNum string - hostname string - disableCertificateVerification bool -} - -func (a *iRMCAccessDetails) Type() string { - return a.bmcType -} - -// NeedsMAC returns true when the host is going to need a separate -// port created rather than having it discovered. -func (a *iRMCAccessDetails) NeedsMAC() bool { - return false -} - -func (a *iRMCAccessDetails) Driver() string { - return "irmc" -} - -func (a *iRMCAccessDetails) DisableCertificateVerification() bool { - return a.disableCertificateVerification -} - -// DriverInfo returns a data structure to pass as the DriverInfo -// parameter when creating a node in Ironic. The structure is -// pre-populated with the access information, and the caller is -// expected to add any other information that might be needed (such as -// the kernel and ramdisk locations). -func (a *iRMCAccessDetails) DriverInfo(bmcCreds Credentials) map[string]interface{} { - result := map[string]interface{}{ - "irmc_username": bmcCreds.Username, - "irmc_password": bmcCreds.Password, - "irmc_address": a.hostname, - "ipmi_username": bmcCreds.Username, - "ipmi_password": bmcCreds.Password, - "ipmi_address": a.hostname, - } - - if a.disableCertificateVerification { - result["irmc_verify_ca"] = false - } - - if a.portNum != "" { - result["irmc_port"] = a.portNum - } - - return result -} - -func (a *iRMCAccessDetails) BIOSInterface() string { - return "" -} - -func (a *iRMCAccessDetails) BootInterface() string { - return "pxe" -} - -func (a *iRMCAccessDetails) ManagementInterface() string { - return "" -} - -func (a *iRMCAccessDetails) PowerInterface() string { - return "ipmitool" -} - -func (a *iRMCAccessDetails) RAIDInterface() string { - return "irmc" -} - -func (a *iRMCAccessDetails) VendorInterface() string { - return "" -} - -func (a *iRMCAccessDetails) SupportsSecureBoot() bool { - return true -} - -func (a *iRMCAccessDetails) SupportsISOPreprovisioningImage() bool { - return false -} - -func (a *iRMCAccessDetails) RequiresProvisioningNetwork() bool { - return true -} - -func (a *iRMCAccessDetails) BuildBIOSSettings(firmwareConfig *FirmwareConfig) (settings []map[string]string, err error) { - if firmwareConfig == nil { - return nil, nil - } - - var value string - - if firmwareConfig.VirtualizationEnabled != nil { - value = "False" - if *firmwareConfig.VirtualizationEnabled { - value = "True" - } - settings = append(settings, - map[string]string{ - "name": "cpu_vt_enabled", - "value": value, - }, - ) - } - - if firmwareConfig.SimultaneousMultithreadingEnabled != nil { - value = "False" - if *firmwareConfig.SimultaneousMultithreadingEnabled { - value = "True" - } - settings = append(settings, - map[string]string{ - "name": "hyper_threading_enabled", - "value": value, - }, - ) - } - - if firmwareConfig.SriovEnabled != nil { - value = "False" - if *firmwareConfig.SriovEnabled { - value = "True" - } - settings = append(settings, - map[string]string{ - "name": "single_root_io_virtualization_support_enabled", - "value": value, - }, - ) - } - - return -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/redfish.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/redfish.go deleted file mode 100644 index c2d78c33f..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/redfish.go +++ /dev/null @@ -1,180 +0,0 @@ -package bmc - -import ( - "fmt" - "net/url" - "strings" -) - -func init() { - schemes := []string{"http", "https"} - RegisterFactory("redfish", newRedfishAccessDetails, schemes) - RegisterFactory("ilo5-redfish", newRedfishAccessDetails, schemes) - RegisterFactory("idrac-redfish", newRedfishiDracAccessDetails, schemes) -} - -func redfishDetails(parsedURL *url.URL, disableCertificateVerification bool) *redfishAccessDetails { - return &redfishAccessDetails{ - bmcType: parsedURL.Scheme, - host: parsedURL.Host, - path: parsedURL.Path, - disableCertificateVerification: disableCertificateVerification, - } -} - -func newRedfishAccessDetails(parsedURL *url.URL, disableCertificateVerification bool) (AccessDetails, error) { - return redfishDetails(parsedURL, disableCertificateVerification), nil -} - -func newRedfishiDracAccessDetails(parsedURL *url.URL, disableCertificateVerification bool) (AccessDetails, error) { - return &redfishiDracAccessDetails{ - *redfishDetails(parsedURL, disableCertificateVerification), - }, nil -} - -type redfishAccessDetails struct { - bmcType string - host string - path string - disableCertificateVerification bool -} - -type redfishiDracAccessDetails struct { - redfishAccessDetails -} - -const redfishDefaultScheme = "https" - -func (a *redfishAccessDetails) Type() string { - return a.bmcType -} - -// NeedsMAC returns true when the host is going to need a separate -// port created rather than having it discovered. -func (a *redfishAccessDetails) NeedsMAC() bool { - // For the inspection to work, we need a MAC address - // https://github.com/metal3-io/baremetal-operator/pull/284#discussion_r317579040 - return true -} - -func (a *redfishAccessDetails) Driver() string { - return "redfish" -} - -func (a *redfishAccessDetails) DisableCertificateVerification() bool { - return a.disableCertificateVerification -} - -func getRedfishAddress(bmcType, host string) string { - redfishAddress := []string{} - schemes := strings.Split(bmcType, "+") - if len(schemes) > 1 { - redfishAddress = append(redfishAddress, schemes[1]) - } else { - redfishAddress = append(redfishAddress, redfishDefaultScheme) - } - redfishAddress = append(redfishAddress, "://") - redfishAddress = append(redfishAddress, host) - return strings.Join(redfishAddress, "") -} - -// DriverInfo returns a data structure to pass as the DriverInfo -// parameter when creating a node in Ironic. The structure is -// pre-populated with the access information, and the caller is -// expected to add any other information that might be needed (such as -// the kernel and ramdisk locations). -func (a *redfishAccessDetails) DriverInfo(bmcCreds Credentials) map[string]interface{} { - result := map[string]interface{}{ - "redfish_system_id": a.path, - "redfish_username": bmcCreds.Username, - "redfish_password": bmcCreds.Password, - "redfish_address": getRedfishAddress(a.bmcType, a.host), - } - - if a.disableCertificateVerification { - result["redfish_verify_ca"] = false - } - - return result -} - -func (a *redfishAccessDetails) BIOSInterface() string { - return "" -} - -// That can be either pxe or redfish-virtual-media -func (a *redfishAccessDetails) BootInterface() string { - return "ipxe" -} - -func (a *redfishAccessDetails) ManagementInterface() string { - return "" -} - -func (a *redfishAccessDetails) PowerInterface() string { - return "" -} - -func (a *redfishAccessDetails) RAIDInterface() string { - return "no-raid" -} - -func (a *redfishAccessDetails) VendorInterface() string { - return "" -} - -func (a *redfishAccessDetails) SupportsSecureBoot() bool { - return true -} - -func (a *redfishAccessDetails) SupportsISOPreprovisioningImage() bool { - return false -} - -func (a *redfishAccessDetails) RequiresProvisioningNetwork() bool { - return true -} - -func (a *redfishAccessDetails) BuildBIOSSettings(firmwareConfig *FirmwareConfig) (settings []map[string]string, err error) { - if firmwareConfig != nil { - return nil, fmt.Errorf("firmware settings for %s are not supported", a.Driver()) - } - return nil, nil -} - -// iDrac Redfish Overrides -func (a *redfishiDracAccessDetails) Driver() string { - return "idrac" -} - -func (a *redfishiDracAccessDetails) BIOSInterface() string { - return "idrac-redfish" -} - -func (a *redfishiDracAccessDetails) BootInterface() string { - return "ipxe" -} - -func (a *redfishiDracAccessDetails) ManagementInterface() string { - return "idrac-redfish" -} - -func (a *redfishiDracAccessDetails) PowerInterface() string { - return "idrac-redfish" -} - -func (a *redfishiDracAccessDetails) RAIDInterface() string { - return "no-raid" -} - -func (a *redfishiDracAccessDetails) VendorInterface() string { - // NOTE(dtantsur): the idrac hardware type defaults to WSMAN vendor, we need to use the Redfish implementation. - return "idrac-redfish" -} - -func (a *redfishiDracAccessDetails) BuildBIOSSettings(firmwareConfig *FirmwareConfig) (settings []map[string]string, err error) { - if firmwareConfig != nil { - return nil, fmt.Errorf("firmware settings for %s are not supported", a.Driver()) - } - return nil, nil -} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/redfish_virtualmedia.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/redfish_virtualmedia.go deleted file mode 100644 index bd319d177..000000000 --- a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc/redfish_virtualmedia.go +++ /dev/null @@ -1,111 +0,0 @@ -package bmc - -import ( - "fmt" - "net/url" -) - -func init() { - schemes := []string{"http", "https"} - RegisterFactory("redfish-virtualmedia", newRedfishVirtualMediaAccessDetails, schemes) - RegisterFactory("ilo5-virtualmedia", newRedfishVirtualMediaAccessDetails, schemes) -} - -func newRedfishVirtualMediaAccessDetails(parsedURL *url.URL, disableCertificateVerification bool) (AccessDetails, error) { - return &redfishVirtualMediaAccessDetails{ - bmcType: parsedURL.Scheme, - host: parsedURL.Host, - path: parsedURL.Path, - disableCertificateVerification: disableCertificateVerification, - }, nil -} - -type redfishVirtualMediaAccessDetails struct { - bmcType string - host string - path string - disableCertificateVerification bool -} - -func (a *redfishVirtualMediaAccessDetails) Type() string { - return a.bmcType -} - -// NeedsMAC returns true when the host is going to need a separate -// port created rather than having it discovered. -func (a *redfishVirtualMediaAccessDetails) NeedsMAC() bool { - // For the inspection to work, we need a MAC address - // https://github.com/metal3-io/baremetal-operator/pull/284#discussion_r317579040 - return true -} - -func (a *redfishVirtualMediaAccessDetails) Driver() string { - return "redfish" -} - -func (a *redfishVirtualMediaAccessDetails) DisableCertificateVerification() bool { - return a.disableCertificateVerification -} - -// DriverInfo returns a data structure to pass as the DriverInfo -// parameter when creating a node in Ironic. The structure is -// pre-populated with the access information, and the caller is -// expected to add any other information that might be needed (such as -// the kernel and ramdisk locations). -func (a *redfishVirtualMediaAccessDetails) DriverInfo(bmcCreds Credentials) map[string]interface{} { - result := map[string]interface{}{ - "redfish_system_id": a.path, - "redfish_username": bmcCreds.Username, - "redfish_password": bmcCreds.Password, - "redfish_address": getRedfishAddress(a.bmcType, a.host), - } - - if a.disableCertificateVerification { - result["redfish_verify_ca"] = false - } - - return result -} - -func (a *redfishVirtualMediaAccessDetails) BIOSInterface() string { - return "" -} - -func (a *redfishVirtualMediaAccessDetails) BootInterface() string { - return "redfish-virtual-media" -} - -func (a *redfishVirtualMediaAccessDetails) ManagementInterface() string { - return "" -} - -func (a *redfishVirtualMediaAccessDetails) PowerInterface() string { - return "" -} - -func (a *redfishVirtualMediaAccessDetails) RAIDInterface() string { - return "no-raid" -} - -func (a *redfishVirtualMediaAccessDetails) VendorInterface() string { - return "" -} - -func (a *redfishVirtualMediaAccessDetails) SupportsSecureBoot() bool { - return true -} - -func (a *redfishVirtualMediaAccessDetails) SupportsISOPreprovisioningImage() bool { - return true -} - -func (a *redfishVirtualMediaAccessDetails) RequiresProvisioningNetwork() bool { - return false -} - -func (a *redfishVirtualMediaAccessDetails) BuildBIOSSettings(firmwareConfig *FirmwareConfig) (settings []map[string]string, err error) { - if firmwareConfig != nil { - return nil, fmt.Errorf("firmware settings for %s are not supported", a.Driver()) - } - return nil, nil -} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml deleted file mode 100644 index 14efbb00e..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml +++ /dev/null @@ -1,136 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/612 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: configs.operator.openshift.io -spec: - group: operator.openshift.io - names: - categories: - - coreoperators - kind: Config - plural: configs - singular: config - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Config provides information to configure the config operator. It handles installation, migration or synchronization of cloud based cluster configurations like AWS or Azure. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec is the specification of the desired behavior of the Config Operator. - type: object - properties: - logLevel: - description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - managementState: - description: managementState indicates whether and how the operator should manage the component - type: string - pattern: ^(Managed|Unmanaged|Force|Removed)$ - observedConfig: - description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - description: status defines the observed status of the Config Operator. - type: object - properties: - conditions: - description: conditions is a list of conditions and their status - type: array - items: - description: OperatorCondition is just the standard condition fields. - type: object - properties: - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - generations: - description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - type: array - items: - description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. - type: object - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload controller involved - type: integer - format: int64 - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're tracking - type: string - observedGeneration: - description: observedGeneration is the last generation change you've dealt with - type: integer - format: int64 - readyReplicas: - description: readyReplicas indicates how many replicas are ready and at the desired state - type: integer - format: int32 - version: - description: version is the level this availability applies to - type: string - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml deleted file mode 100644 index ff4dc1c8a..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml +++ /dev/null @@ -1,192 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/752 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: etcds.operator.openshift.io -spec: - group: operator.openshift.io - names: - categories: - - coreoperators - kind: Etcd - plural: etcds - singular: etcd - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Etcd provides information to configure an operator to manage etcd. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - type: object - properties: - failedRevisionLimit: - description: failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) - type: integer - format: int32 - forceRedeploymentReason: - description: forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config. - type: string - logLevel: - description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - managementState: - description: managementState indicates whether and how the operator should manage the component - type: string - pattern: ^(Managed|Unmanaged|Force|Removed)$ - observedConfig: - description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - succeededRevisionLimit: - description: succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) - type: integer - format: int32 - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - type: object - properties: - conditions: - description: conditions is a list of conditions and their status - type: array - items: - description: OperatorCondition is just the standard condition fields. - type: object - properties: - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - generations: - description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - type: array - items: - description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. - type: object - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload controller involved - type: integer - format: int64 - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're tracking - type: string - latestAvailableRevision: - description: latestAvailableRevision is the deploymentID of the most recent deployment - type: integer - format: int32 - latestAvailableRevisionReason: - description: latestAvailableRevisionReason describe the detailed reason for the most recent deployment - type: string - nodeStatuses: - description: nodeStatuses track the deployment values and errors across individual nodes - type: array - items: - description: NodeStatus provides information about the current state of a particular node managed by this operator. - type: object - properties: - currentRevision: - description: currentRevision is the generation of the most recently successful deployment - type: integer - format: int32 - lastFailedCount: - description: lastFailedCount is how often the installer pod of the last failed revision failed. - type: integer - lastFailedReason: - description: lastFailedReason is a machine readable failure reason string. - type: string - lastFailedRevision: - description: lastFailedRevision is the generation of the deployment we tried and failed to deploy. - type: integer - format: int32 - lastFailedRevisionErrors: - description: lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. - type: array - items: - type: string - lastFailedTime: - description: lastFailedTime is the time the last failed revision failed the last time. - type: string - format: date-time - lastFallbackCount: - description: lastFallbackCount is how often a fallback to a previous revision happened. - type: integer - nodeName: - description: nodeName is the name of the node - type: string - targetRevision: - description: targetRevision is the generation of the deployment we're trying to apply - type: integer - format: int32 - observedGeneration: - description: observedGeneration is the last generation change you've dealt with - type: integer - format: int64 - readyReplicas: - description: readyReplicas indicates how many replicas are ready and at the desired state - type: integer - format: int32 - version: - description: version is the level this availability applies to - type: string - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml deleted file mode 100644 index 33bba0b7c..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml +++ /dev/null @@ -1,242 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/475 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: kubeapiservers.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: KubeAPIServer - plural: kubeapiservers - singular: kubeapiserver - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "KubeAPIServer provides information to configure an operator - to manage kube-apiserver. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec is the specification of the desired behavior of the - Kubernetes API Server - properties: - failedRevisionLimit: - description: failedRevisionLimit is the number of failed static pod - installer revisions to keep on disk and in the api -1 = unlimited, - 0 or unset = 5 (default) - format: int32 - type: integer - forceRedeploymentReason: - description: forceRedeploymentReason can be used to force the redeployment - of the operand by providing a unique string. This provides a mechanism - to kick a previously failed deployment and provide a reason why - you think it will work this time instead of failing again on the - same config. - type: string - logLevel: - default: Normal - description: "logLevel is an intent based logging for an overall component. - \ It does not give fine grained control, but it is a simple way - to manage coarse grained logging choices that operators have to - interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", - \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - type: string - managementState: - description: managementState indicates whether and how the operator - should manage the component - pattern: ^(Managed|Force)$ - type: string - observedConfig: - description: observedConfig holds a sparse config that controller - has observed from the cluster state. It exists in spec because - it is an input to the level for the operator - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - default: Normal - description: "operatorLogLevel is an intent based logging for the - operator itself. It does not give fine grained control, but it - is a simple way to manage coarse grained logging choices that operators - have to interpret for themselves. \n Valid values are: \"Normal\", - \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - type: string - succeededRevisionLimit: - description: succeededRevisionLimit is the number of successful static - pod installer revisions to keep on disk and in the api -1 = unlimited, - 0 or unset = 5 (default) - format: int32 - type: integer - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that - will override any previously set options. It only needs to be the - fields to override it will end up overlaying in the following order: - 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - status: - description: status is the most recently observed status of the Kubernetes - API Server - properties: - conditions: - description: conditions is a list of conditions and their status - items: - description: OperatorCondition is just the standard condition fields. - properties: - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - type: object - type: array - generations: - description: generations are used to determine when an item needs - to be reconciled or has changed in a way that needs a reaction. - items: - description: GenerationStatus keeps track of the generation for - a given resource so that decisions about forced updates can be - made. - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without - generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload - controller involved - format: int64 - type: integer - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're - tracking - type: string - type: object - type: array - latestAvailableRevision: - description: latestAvailableRevision is the deploymentID of the most - recent deployment - format: int32 - type: integer - latestAvailableRevisionReason: - description: latestAvailableRevisionReason describe the detailed reason - for the most recent deployment - type: string - nodeStatuses: - description: nodeStatuses track the deployment values and errors across - individual nodes - items: - description: NodeStatus provides information about the current state - of a particular node managed by this operator. - properties: - currentRevision: - description: currentRevision is the generation of the most recently - successful deployment - format: int32 - type: integer - lastFailedCount: - description: lastFailedCount is how often the installer pod - of the last failed revision failed. - type: integer - lastFailedReason: - description: lastFailedReason is a machine readable failure - reason string. - type: string - lastFailedRevision: - description: lastFailedRevision is the generation of the deployment - we tried and failed to deploy. - format: int32 - type: integer - lastFailedRevisionErrors: - description: lastFailedRevisionErrors is a list of human readable - errors during the failed deployment referenced in lastFailedRevision. - items: - type: string - type: array - lastFailedTime: - description: lastFailedTime is the time the last failed revision - failed the last time. - format: date-time - type: string - lastFallbackCount: - description: lastFallbackCount is how often a fallback to a - previous revision happened. - type: integer - nodeName: - description: nodeName is the name of the node - type: string - targetRevision: - description: targetRevision is the generation of the deployment - we're trying to apply - format: int32 - type: integer - type: object - type: array - observedGeneration: - description: observedGeneration is the last generation change you've - dealt with - format: int64 - type: integer - readyReplicas: - description: readyReplicas indicates how many replicas are ready and - at the desired state - format: int32 - type: integer - version: - description: version is the level this availability applies to - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml-patch deleted file mode 100644 index 8145f00c4..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml-patch +++ /dev/null @@ -1,3 +0,0 @@ -- op: replace - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/managementState/pattern - value: "^(Managed|Force)$" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml deleted file mode 100644 index 21361c6fb..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml +++ /dev/null @@ -1,254 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/475 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: kubecontrollermanagers.operator.openshift.io -spec: - group: operator.openshift.io - names: - categories: - - coreoperators - kind: KubeControllerManager - plural: kubecontrollermanagers - singular: kubecontrollermanager - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "KubeControllerManager provides information to configure an operator - to manage kube-controller-manager. \n Compatibility level 1: Stable within - a major release for a minimum of 12 months or 3 minor releases (whichever - is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec is the specification of the desired behavior of the - Kubernetes Controller Manager - properties: - failedRevisionLimit: - description: failedRevisionLimit is the number of failed static pod - installer revisions to keep on disk and in the api -1 = unlimited, - 0 or unset = 5 (default) - format: int32 - type: integer - forceRedeploymentReason: - description: forceRedeploymentReason can be used to force the redeployment - of the operand by providing a unique string. This provides a mechanism - to kick a previously failed deployment and provide a reason why - you think it will work this time instead of failing again on the - same config. - type: string - logLevel: - default: Normal - description: "logLevel is an intent based logging for an overall component. - \ It does not give fine grained control, but it is a simple way - to manage coarse grained logging choices that operators have to - interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", - \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - type: string - managementState: - description: managementState indicates whether and how the operator - should manage the component - pattern: ^(Managed|Force)$ - type: string - observedConfig: - description: observedConfig holds a sparse config that controller - has observed from the cluster state. It exists in spec because - it is an input to the level for the operator - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - default: Normal - description: "operatorLogLevel is an intent based logging for the - operator itself. It does not give fine grained control, but it - is a simple way to manage coarse grained logging choices that operators - have to interpret for themselves. \n Valid values are: \"Normal\", - \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - type: string - succeededRevisionLimit: - description: succeededRevisionLimit is the number of successful static - pod installer revisions to keep on disk and in the api -1 = unlimited, - 0 or unset = 5 (default) - format: int32 - type: integer - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that - will override any previously set options. It only needs to be the - fields to override it will end up overlaying in the following order: - 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - useMoreSecureServiceCA: - default: false - description: useMoreSecureServiceCA indicates that the service-ca.crt - provided in SA token volumes should include only enough certificates - to validate service serving certificates. Once set to true, it cannot - be set to false. Even if someone finds a way to set it back to false, - the service-ca.crt files that previously existed will only have - the more secure content. - type: boolean - type: object - status: - description: status is the most recently observed status of the Kubernetes - Controller Manager - properties: - conditions: - description: conditions is a list of conditions and their status - items: - description: OperatorCondition is just the standard condition fields. - properties: - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - type: object - type: array - generations: - description: generations are used to determine when an item needs - to be reconciled or has changed in a way that needs a reaction. - items: - description: GenerationStatus keeps track of the generation for - a given resource so that decisions about forced updates can be - made. - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without - generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload - controller involved - format: int64 - type: integer - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're - tracking - type: string - type: object - type: array - latestAvailableRevision: - description: latestAvailableRevision is the deploymentID of the most - recent deployment - format: int32 - type: integer - latestAvailableRevisionReason: - description: latestAvailableRevisionReason describe the detailed reason - for the most recent deployment - type: string - nodeStatuses: - description: nodeStatuses track the deployment values and errors across - individual nodes - items: - description: NodeStatus provides information about the current state - of a particular node managed by this operator. - properties: - currentRevision: - description: currentRevision is the generation of the most recently - successful deployment - format: int32 - type: integer - lastFailedCount: - description: lastFailedCount is how often the installer pod - of the last failed revision failed. - type: integer - lastFailedReason: - description: lastFailedReason is a machine readable failure - reason string. - type: string - lastFailedRevision: - description: lastFailedRevision is the generation of the deployment - we tried and failed to deploy. - format: int32 - type: integer - lastFailedRevisionErrors: - description: lastFailedRevisionErrors is a list of human readable - errors during the failed deployment referenced in lastFailedRevision. - items: - type: string - type: array - lastFailedTime: - description: lastFailedTime is the time the last failed revision - failed the last time. - format: date-time - type: string - lastFallbackCount: - description: lastFallbackCount is how often a fallback to a - previous revision happened. - type: integer - nodeName: - description: nodeName is the name of the node - type: string - targetRevision: - description: targetRevision is the generation of the deployment - we're trying to apply - format: int32 - type: integer - type: object - type: array - observedGeneration: - description: observedGeneration is the last generation change you've - dealt with - format: int64 - type: integer - readyReplicas: - description: readyReplicas indicates how many replicas are ready and - at the desired state - format: int32 - type: integer - version: - description: version is the level this availability applies to - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml-patch deleted file mode 100644 index 8145f00c4..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml-patch +++ /dev/null @@ -1,3 +0,0 @@ -- op: replace - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/managementState/pattern - value: "^(Managed|Force)$" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml deleted file mode 100644 index 1efccbea9..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml +++ /dev/null @@ -1,244 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/475 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: kubeschedulers.operator.openshift.io -spec: - group: operator.openshift.io - names: - categories: - - coreoperators - kind: KubeScheduler - plural: kubeschedulers - singular: kubescheduler - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "KubeScheduler provides information to configure an operator - to manage scheduler. \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec is the specification of the desired behavior of the - Kubernetes Scheduler - properties: - failedRevisionLimit: - description: failedRevisionLimit is the number of failed static pod - installer revisions to keep on disk and in the api -1 = unlimited, - 0 or unset = 5 (default) - format: int32 - type: integer - forceRedeploymentReason: - description: forceRedeploymentReason can be used to force the redeployment - of the operand by providing a unique string. This provides a mechanism - to kick a previously failed deployment and provide a reason why - you think it will work this time instead of failing again on the - same config. - type: string - logLevel: - default: Normal - description: "logLevel is an intent based logging for an overall component. - \ It does not give fine grained control, but it is a simple way - to manage coarse grained logging choices that operators have to - interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", - \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - type: string - managementState: - description: managementState indicates whether and how the operator - should manage the component - pattern: ^(Managed|Force)$ - type: string - observedConfig: - description: observedConfig holds a sparse config that controller - has observed from the cluster state. It exists in spec because - it is an input to the level for the operator - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - default: Normal - description: "operatorLogLevel is an intent based logging for the - operator itself. It does not give fine grained control, but it - is a simple way to manage coarse grained logging choices that operators - have to interpret for themselves. \n Valid values are: \"Normal\", - \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - type: string - succeededRevisionLimit: - description: succeededRevisionLimit is the number of successful static - pod installer revisions to keep on disk and in the api -1 = unlimited, - 0 or unset = 5 (default) - format: int32 - type: integer - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that - will override any previously set options. It only needs to be the - fields to override it will end up overlaying in the following order: - 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - status: - description: status is the most recently observed status of the Kubernetes - Scheduler - properties: - conditions: - description: conditions is a list of conditions and their status - items: - description: OperatorCondition is just the standard condition fields. - properties: - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - type: object - type: array - generations: - description: generations are used to determine when an item needs - to be reconciled or has changed in a way that needs a reaction. - items: - description: GenerationStatus keeps track of the generation for - a given resource so that decisions about forced updates can be - made. - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without - generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload - controller involved - format: int64 - type: integer - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're - tracking - type: string - type: object - type: array - latestAvailableRevision: - description: latestAvailableRevision is the deploymentID of the most - recent deployment - format: int32 - type: integer - latestAvailableRevisionReason: - description: latestAvailableRevisionReason describe the detailed reason - for the most recent deployment - type: string - nodeStatuses: - description: nodeStatuses track the deployment values and errors across - individual nodes - items: - description: NodeStatus provides information about the current state - of a particular node managed by this operator. - properties: - currentRevision: - description: currentRevision is the generation of the most recently - successful deployment - format: int32 - type: integer - lastFailedCount: - description: lastFailedCount is how often the installer pod - of the last failed revision failed. - type: integer - lastFailedReason: - description: lastFailedReason is a machine readable failure - reason string. - type: string - lastFailedRevision: - description: lastFailedRevision is the generation of the deployment - we tried and failed to deploy. - format: int32 - type: integer - lastFailedRevisionErrors: - description: lastFailedRevisionErrors is a list of human readable - errors during the failed deployment referenced in lastFailedRevision. - items: - type: string - type: array - lastFailedTime: - description: lastFailedTime is the time the last failed revision - failed the last time. - format: date-time - type: string - lastFallbackCount: - description: lastFallbackCount is how often a fallback to a - previous revision happened. - type: integer - nodeName: - description: nodeName is the name of the node - type: string - targetRevision: - description: targetRevision is the generation of the deployment - we're trying to apply - format: int32 - type: integer - type: object - type: array - observedGeneration: - description: observedGeneration is the last generation change you've - dealt with - format: int64 - type: integer - readyReplicas: - description: readyReplicas indicates how many replicas are ready and - at the desired state - format: int32 - type: integer - version: - description: version is the level this availability applies to - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml-patch deleted file mode 100644 index 8145f00c4..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml-patch +++ /dev/null @@ -1,3 +0,0 @@ -- op: replace - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/managementState/pattern - value: "^(Managed|Force)$" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml deleted file mode 100644 index 937718b77..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml +++ /dev/null @@ -1,141 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/475 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: openshiftapiservers.operator.openshift.io -spec: - group: operator.openshift.io - names: - categories: - - coreoperators - kind: OpenShiftAPIServer - plural: openshiftapiservers - singular: openshiftapiserver - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec is the specification of the desired behavior of the OpenShift API Server. - type: object - properties: - logLevel: - description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - managementState: - description: managementState indicates whether and how the operator should manage the component - type: string - pattern: ^(Managed|Unmanaged|Force|Removed)$ - observedConfig: - description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - description: status defines the observed status of the OpenShift API Server. - type: object - properties: - conditions: - description: conditions is a list of conditions and their status - type: array - items: - description: OperatorCondition is just the standard condition fields. - type: object - properties: - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - generations: - description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - type: array - items: - description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. - type: object - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload controller involved - type: integer - format: int64 - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're tracking - type: string - latestAvailableRevision: - description: latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods. - type: integer - format: int32 - minimum: 0 - observedGeneration: - description: observedGeneration is the last generation change you've dealt with - type: integer - format: int64 - readyReplicas: - description: readyReplicas indicates how many replicas are ready and at the desired state - type: integer - format: int32 - version: - description: version is the level this availability applies to - type: string - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml deleted file mode 100644 index 360765c3b..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml +++ /dev/null @@ -1,142 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/692 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: cloudcredentials.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: CloudCredential - listKind: CloudCredentialList - plural: cloudcredentials - singular: cloudcredential - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "CloudCredential provides a means to configure an operator to manage CredentialsRequests. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. - type: object - properties: - credentialsMode: - description: 'CredentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into "manual" mode if desired. Leaving the field in default mode runs CCO so that the cluster''s cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes: AWS/Azure/GCP: "" (Default), "Mint", "Passthrough", "Manual" Others: Do not set value as other platforms only support running in "Passthrough"' - type: string - enum: - - "" - - Manual - - Mint - - Passthrough - logLevel: - description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - managementState: - description: managementState indicates whether and how the operator should manage the component - type: string - pattern: ^(Managed|Unmanaged|Force|Removed)$ - observedConfig: - description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - description: CloudCredentialStatus defines the observed status of the cloud-credential-operator. - type: object - properties: - conditions: - description: conditions is a list of conditions and their status - type: array - items: - description: OperatorCondition is just the standard condition fields. - type: object - properties: - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - generations: - description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - type: array - items: - description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. - type: object - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload controller involved - type: integer - format: int64 - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're tracking - type: string - observedGeneration: - description: observedGeneration is the last generation change you've dealt with - type: integer - format: int64 - readyReplicas: - description: readyReplicas indicates how many replicas are ready and at the desired state - type: integer - format: int32 - version: - description: version is the level this availability applies to - type: string - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml deleted file mode 100644 index befa175b7..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml +++ /dev/null @@ -1,133 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/503 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: kubestorageversionmigrators.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: KubeStorageVersionMigrator - listKind: KubeStorageVersionMigratorList - plural: kubestorageversionmigrators - singular: kubestorageversionmigrator - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - type: object - properties: - logLevel: - description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - managementState: - description: managementState indicates whether and how the operator should manage the component - type: string - pattern: ^(Managed|Unmanaged|Force|Removed)$ - observedConfig: - description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - type: object - properties: - conditions: - description: conditions is a list of conditions and their status - type: array - items: - description: OperatorCondition is just the standard condition fields. - type: object - properties: - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - generations: - description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - type: array - items: - description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. - type: object - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload controller involved - type: integer - format: int64 - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're tracking - type: string - observedGeneration: - description: observedGeneration is the last generation change you've dealt with - type: integer - format: int64 - readyReplicas: - description: readyReplicas indicates how many replicas are ready and at the desired state - type: integer - format: int32 - version: - description: version is the level this availability applies to - type: string - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml deleted file mode 100644 index 1efa2d46e..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml +++ /dev/null @@ -1,140 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/475 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: authentications.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: Authentication - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Authentication provides information to configure an operator to manage authentication. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - type: object - properties: - logLevel: - description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - managementState: - description: managementState indicates whether and how the operator should manage the component - type: string - pattern: ^(Managed|Unmanaged|Force|Removed)$ - observedConfig: - description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - type: object - properties: - conditions: - description: conditions is a list of conditions and their status - type: array - items: - description: OperatorCondition is just the standard condition fields. - type: object - properties: - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - generations: - description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - type: array - items: - description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. - type: object - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload controller involved - type: integer - format: int64 - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're tracking - type: string - oauthAPIServer: - description: OAuthAPIServer holds status specific only to oauth-apiserver - type: object - properties: - latestAvailableRevision: - description: LatestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods. - type: integer - format: int32 - minimum: 0 - observedGeneration: - description: observedGeneration is the last generation change you've dealt with - type: integer - format: int64 - readyReplicas: - description: readyReplicas indicates how many replicas are ready and at the desired state - type: integer - format: int32 - version: - description: version is the level this availability applies to - type: string - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml deleted file mode 100644 index 64b1e93ba..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml +++ /dev/null @@ -1,134 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/475 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: openshiftcontrollermanagers.operator.openshift.io -spec: - group: operator.openshift.io - names: - categories: - - coreoperators - kind: OpenShiftControllerManager - plural: openshiftcontrollermanagers - singular: openshiftcontrollermanager - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - type: object - properties: - logLevel: - description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - managementState: - description: managementState indicates whether and how the operator should manage the component - type: string - pattern: ^(Managed|Unmanaged|Force|Removed)$ - observedConfig: - description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - type: object - properties: - conditions: - description: conditions is a list of conditions and their status - type: array - items: - description: OperatorCondition is just the standard condition fields. - type: object - properties: - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - generations: - description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - type: array - items: - description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. - type: object - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload controller involved - type: integer - format: int64 - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're tracking - type: string - observedGeneration: - description: observedGeneration is the last generation change you've dealt with - type: integer - format: int64 - readyReplicas: - description: readyReplicas indicates how many replicas are ready and at the desired state - type: integer - format: int32 - version: - description: version is the level this availability applies to - type: string - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml deleted file mode 100644 index 2bf181862..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml +++ /dev/null @@ -1,134 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/670 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: storages.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: Storage - plural: storages - singular: storage - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - logLevel: - description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - managementState: - description: managementState indicates whether and how the operator should manage the component - type: string - pattern: ^(Managed|Unmanaged|Force|Removed)$ - observedConfig: - description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - description: status holds observed values from the cluster. They may not be overridden. - type: object - properties: - conditions: - description: conditions is a list of conditions and their status - type: array - items: - description: OperatorCondition is just the standard condition fields. - type: object - properties: - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - generations: - description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - type: array - items: - description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. - type: object - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload controller involved - type: integer - format: int64 - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're tracking - type: string - observedGeneration: - description: observedGeneration is the last generation change you've dealt with - type: integer - format: int64 - readyReplicas: - description: readyReplicas indicates how many replicas are ready and at the desired state - type: integer - format: int32 - version: - description: version is the level this availability applies to - type: string - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml deleted file mode 100644 index 0ac4a02d1..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml +++ /dev/null @@ -1,1746 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/616 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: ingresscontrollers.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: IngressController - listKind: IngressControllerList - plural: ingresscontrollers - singular: ingresscontroller - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "IngressController describes a managed ingress controller for - the cluster. The controller can service OpenShift Route and Kubernetes Ingress - resources. \n When an IngressController is created, a new ingress controller - deployment is created to allow external traffic to reach the services that - expose Ingress or Route resources. Updating this resource may lead to disruption - for public facing network connections as a new ingress controller revision - may be rolled out. \n https://kubernetes.io/docs/concepts/services-networking/ingress-controllers - \n Whenever possible, sensible defaults for the platform are used. See each - field for more details. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec is the specification of the desired behavior of the - IngressController. - properties: - clientTLS: - description: clientTLS specifies settings for requesting and verifying - client certificates, which can be used to enable mutual TLS for - edge-terminated and reencrypt routes. - properties: - allowedSubjectPatterns: - description: allowedSubjectPatterns specifies a list of regular - expressions that should be matched against the distinguished - name on a valid client certificate to filter requests. The - regular expressions must use PCRE syntax. If this list is empty, - no filtering is performed. If the list is nonempty, then at - least one pattern must match a client certificate's distinguished - name or else the ingress controller rejects the certificate - and denies the connection. - items: - type: string - type: array - x-kubernetes-list-type: atomic - clientCA: - description: clientCA specifies a configmap containing the PEM-encoded - CA certificate bundle that should be used to verify a client's - certificate. The administrator must create this configmap in - the openshift-config namespace. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - clientCertificatePolicy: - description: "clientCertificatePolicy specifies whether the ingress - controller requires clients to provide certificates. This field - accepts the values \"Required\" or \"Optional\". \n Note that - the ingress controller only checks client certificates for edge-terminated - and reencrypt TLS routes; it cannot check certificates for cleartext - HTTP or passthrough TLS routes." - enum: - - "" - - Required - - Optional - type: string - required: - - clientCA - - clientCertificatePolicy - type: object - defaultCertificate: - description: "defaultCertificate is a reference to a secret containing - the default certificate served by the ingress controller. When Routes - don't specify their own certificate, defaultCertificate is used. - \n The secret must contain the following keys and data: \n tls.crt: - certificate file contents tls.key: key file contents \n If unset, - a wildcard certificate is automatically generated and used. The - certificate is valid for the ingress controller domain (and subdomains) - and the generated certificate's CA will be automatically integrated - with the cluster's trust store. \n If a wildcard certificate is - used and shared by multiple HTTP/2 enabled routes (which implies - ALPN) then clients (i.e., notably browsers) are at liberty to reuse - open connections. This means a client can reuse a connection to - another route and that is likely to fail. This behaviour is generally - known as connection coalescing. \n The in-use certificate (whether - generated or user-specified) will be automatically integrated with - OpenShift's built-in OAuth server." - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - domain: - description: "domain is a DNS name serviced by the ingress controller - and is used to configure multiple features: \n * For the LoadBalancerService - endpoint publishing strategy, domain is used to configure DNS - records. See endpointPublishingStrategy. \n * When using a generated - default certificate, the certificate will be valid for domain - and its subdomains. See defaultCertificate. \n * The value is published - to individual Route statuses so that end-users know where to target - external DNS records. \n domain must be unique among all IngressControllers, - and cannot be updated. \n If empty, defaults to ingress.config.openshift.io/cluster - .spec.domain." - type: string - endpointPublishingStrategy: - description: "endpointPublishingStrategy is used to publish the ingress - controller endpoints to other networks, enable load balancer integrations, - etc. \n If unset, the default is based on infrastructure.config.openshift.io/cluster - .status.platform: \n AWS: LoadBalancerService (with External - scope) Azure: LoadBalancerService (with External scope) - \ GCP: LoadBalancerService (with External scope) IBMCloud: - \ LoadBalancerService (with External scope) AlibabaCloud: LoadBalancerService - (with External scope) Libvirt: HostNetwork \n Any other platform - types (including None) default to HostNetwork. \n endpointPublishingStrategy - cannot be updated." - properties: - hostNetwork: - description: hostNetwork holds parameters for the HostNetwork - endpoint publishing strategy. Present only if type is HostNetwork. - properties: - httpPort: - default: 80 - description: httpPort is the port on the host which should - be used to listen for HTTP requests. This field should be - set when port 80 is already in use. The value should not - coincide with the NodePort range of the cluster. When the - value is 0 or is not specified it defaults to 80. - format: int32 - maximum: 65535 - minimum: 0 - type: integer - httpsPort: - default: 443 - description: httpsPort is the port on the host which should - be used to listen for HTTPS requests. This field should - be set when port 443 is already in use. The value should - not coincide with the NodePort range of the cluster. When - the value is 0 or is not specified it defaults to 443. - format: int32 - maximum: 65535 - minimum: 0 - type: integer - protocol: - description: "protocol specifies whether the IngressController - expects incoming connections to use plain TCP or whether - the IngressController expects PROXY protocol. \n PROXY protocol - can be used with load balancers that support it to communicate - the source addresses of client connections when forwarding - those connections to the IngressController. Using PROXY - protocol enables the IngressController to report those source - addresses instead of reporting the load balancer's address - in HTTP headers and logs. Note that enabling PROXY protocol - on the IngressController will cause connections to fail - if you are not using a load balancer that uses PROXY protocol - to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt - for information about PROXY protocol. \n The following values - are valid for this field: \n * The empty string. * \"TCP\". - * \"PROXY\". \n The empty string specifies the default, - which is TCP without PROXY protocol. Note that the default - is subject to change." - enum: - - "" - - TCP - - PROXY - type: string - statsPort: - default: 1936 - description: statsPort is the port on the host where the stats - from the router are published. The value should not coincide - with the NodePort range of the cluster. If an external load - balancer is configured to forward connections to this IngressController, - the load balancer should use this port for health checks. - The load balancer can send HTTP probes on this port on a - given node, with the path /healthz/ready to determine if - the ingress controller is ready to receive traffic on the - node. For proper operation the load balancer must not forward - traffic to a node until the health check reports ready. - The load balancer should also stop forwarding requests within - a maximum of 45 seconds after /healthz/ready starts reporting - not-ready. Probing every 5 to 10 seconds, with a 5-second - timeout and with a threshold of two successful or failed - requests to become healthy or unhealthy respectively, are - well-tested values. When the value is 0 or is not specified - it defaults to 1936. - format: int32 - maximum: 65535 - minimum: 0 - type: integer - type: object - loadBalancer: - description: loadBalancer holds parameters for the load balancer. - Present only if type is LoadBalancerService. - properties: - providerParameters: - description: "providerParameters holds desired load balancer - information specific to the underlying infrastructure provider. - \n If empty, defaults will be applied. See specific providerParameters - fields for details about their defaults." - properties: - aws: - description: "aws provides configuration settings that - are specific to AWS load balancers. \n If empty, defaults - will be applied. See specific aws fields for details - about their defaults." - properties: - classicLoadBalancer: - description: classicLoadBalancerParameters holds configuration - parameters for an AWS classic load balancer. Present - only if type is Classic. - properties: - connectionIdleTimeout: - description: connectionIdleTimeout specifies the - maximum time period that a connection may be - idle before the load balancer closes the connection. The - value must be parseable as a time duration value; - see . A - nil or zero value means no opinion, in which - case a default value is used. The default value - for this field is 60s. This default is subject - to change. - format: duration - type: string - type: object - networkLoadBalancer: - description: networkLoadBalancerParameters holds configuration - parameters for an AWS network load balancer. Present - only if type is NLB. - type: object - type: - description: "type is the type of AWS load balancer - to instantiate for an ingresscontroller. \n Valid - values are: \n * \"Classic\": A Classic Load Balancer - that makes routing decisions at either the transport - layer (TCP/SSL) or the application layer (HTTP/HTTPS). - See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb - \n * \"NLB\": A Network Load Balancer that makes - routing decisions at the transport layer (TCP/SSL). - See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" - enum: - - Classic - - NLB - type: string - required: - - type - type: object - gcp: - description: "gcp provides configuration settings that - are specific to GCP load balancers. \n If empty, defaults - will be applied. See specific gcp fields for details - about their defaults." - properties: - clientAccess: - description: "clientAccess describes how client access - is restricted for internal load balancers. \n Valid - values are: * \"Global\": Specifying an internal - load balancer with Global client access allows - clients from any region within the VPC to communicate - with the load balancer. \n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access - \n * \"Local\": Specifying an internal load balancer - with Local client access means only clients within - the same region (and VPC) as the GCP load balancer - \ can communicate with the load balancer. Note - that this is the default behavior. \n https://cloud.google.com/load-balancing/docs/internal#client_access" - enum: - - Global - - Local - type: string - type: object - type: - description: type is the underlying infrastructure provider - for the load balancer. Allowed values are "AWS", "Azure", - "BareMetal", "GCP", "Nutanix", "OpenStack", and "VSphere". - enum: - - AWS - - Azure - - BareMetal - - GCP - - Nutanix - - OpenStack - - VSphere - - IBM - type: string - required: - - type - type: object - scope: - description: scope indicates the scope at which the load balancer - is exposed. Possible values are "External" and "Internal". - enum: - - Internal - - External - type: string - required: - - scope - type: object - nodePort: - description: nodePort holds parameters for the NodePortService - endpoint publishing strategy. Present only if type is NodePortService. - properties: - protocol: - description: "protocol specifies whether the IngressController - expects incoming connections to use plain TCP or whether - the IngressController expects PROXY protocol. \n PROXY protocol - can be used with load balancers that support it to communicate - the source addresses of client connections when forwarding - those connections to the IngressController. Using PROXY - protocol enables the IngressController to report those source - addresses instead of reporting the load balancer's address - in HTTP headers and logs. Note that enabling PROXY protocol - on the IngressController will cause connections to fail - if you are not using a load balancer that uses PROXY protocol - to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt - for information about PROXY protocol. \n The following values - are valid for this field: \n * The empty string. * \"TCP\". - * \"PROXY\". \n The empty string specifies the default, - which is TCP without PROXY protocol. Note that the default - is subject to change." - enum: - - "" - - TCP - - PROXY - type: string - type: object - private: - description: private holds parameters for the Private endpoint - publishing strategy. Present only if type is Private. - type: object - type: - description: "type is the publishing strategy to use. Valid values - are: \n * LoadBalancerService \n Publishes the ingress controller - using a Kubernetes LoadBalancer Service. \n In this configuration, - the ingress controller deployment uses container networking. - A LoadBalancer Service is created to publish the deployment. - \n See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer - \n If domain is set, a wildcard DNS record will be managed to - point at the LoadBalancer Service's external name. DNS records - are managed only in DNS zones defined by dns.config.openshift.io/cluster - .spec.publicZone and .spec.privateZone. \n Wildcard DNS management - is currently supported only on the AWS, Azure, and GCP platforms. - \n * HostNetwork \n Publishes the ingress controller on node - ports where the ingress controller is deployed. \n In this configuration, - the ingress controller deployment uses host networking, bound - to node ports 80 and 443. The user is responsible for configuring - an external load balancer to publish the ingress controller - via the node ports. \n * Private \n Does not publish the ingress - controller. \n In this configuration, the ingress controller - deployment uses container networking, and is not explicitly - published. The user must manually publish the ingress controller. - \n * NodePortService \n Publishes the ingress controller using - a Kubernetes NodePort Service. \n In this configuration, the - ingress controller deployment uses container networking. A NodePort - Service is created to publish the deployment. The specific node - ports are dynamically allocated by OpenShift; however, to support - static port allocations, user changes to the node port field - of the managed NodePort Service will preserved." - enum: - - LoadBalancerService - - HostNetwork - - Private - - NodePortService - type: string - required: - - type - type: object - httpCompression: - description: httpCompression defines a policy for HTTP traffic compression. - By default, there is no HTTP compression. - properties: - mimeTypes: - description: "mimeTypes is a list of MIME types that should have - compression applied. This list can be empty, in which case the - ingress controller does not apply compression. \n Note: Not - all MIME types benefit from compression, but HAProxy will still - use resources to try to compress if instructed to. Generally - speaking, text (html, css, js, etc.) formats benefit from compression, - but formats that are already compressed (image, audio, video, - etc.) benefit little in exchange for the time and cpu spent - on compressing again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2" - items: - description: "CompressionMIMEType defines the format of a single - MIME type. E.g. \"text/css; charset=utf-8\", \"text/html\", - \"text/*\", \"image/svg+xml\", \"application/octet-stream\", - \"X-custom/customsub\", etc. \n The format should follow the - Content-Type definition in RFC 1341: Content-Type := type - \"/\" subtype *[\";\" parameter] - The type in Content-Type - can be one of: application, audio, image, message, multipart, - text, video, or a custom type preceded by \"X-\" and followed - by a token as defined below. - The token is a string of at - least one character, and not containing white space, control - characters, or any of the characters in the tspecials set. - - The tspecials set contains the characters ()<>@,;:\\\"/[]?.= - - The subtype in Content-Type is also a token. - The optional - parameter/s following the subtype are defined as: token - \"=\" (token / quoted-string) - The quoted-string, as defined - in RFC 822, is surrounded by double quotes and can contain - white space plus any character EXCEPT \\, \", and CR. It - can also contain any single ASCII character as long as it - is escaped by \\." - pattern: ^(?i)(x-[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|application|audio|image|message|multipart|text|video)/[^][ - ()\\<>@,;:"/?.=\x00-\x1F\x7F]+(; *[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+=([^][ - ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|"(\\[\x00-\x7F]|[^\x0D"\\])*"))*$ - type: string - type: array - x-kubernetes-list-type: set - type: object - httpEmptyRequestsPolicy: - default: Respond - description: "httpEmptyRequestsPolicy describes how HTTP connections - should be handled if the connection times out before a request is - received. Allowed values for this field are \"Respond\" and \"Ignore\". - \ If the field is set to \"Respond\", the ingress controller sends - an HTTP 400 or 408 response, logs the connection (if access logging - is enabled), and counts the connection in the appropriate metrics. - \ If the field is set to \"Ignore\", the ingress controller closes - the connection without sending a response, logging the connection, - or incrementing metrics. The default value is \"Respond\". \n Typically, - these connections come from load balancers' health probes or Web - browsers' speculative connections (\"preconnect\") and can be safely - ignored. However, these requests may also be caused by network - errors, and so setting this field to \"Ignore\" may impede detection - and diagnosis of problems. In addition, these requests may be caused - by port scans, in which case logging empty requests may aid in detecting - intrusion attempts." - enum: - - Respond - - Ignore - type: string - httpErrorCodePages: - description: httpErrorCodePages specifies a configmap with custom - error pages. The administrator must create this configmap in the - openshift-config namespace. This configmap should have keys in the - format "error-page-.http", where is an - HTTP error code. For example, "error-page-503.http" defines an error - page for HTTP 503 responses. Currently only error pages for 503 - and 404 responses can be customized. Each value in the configmap - should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http - If this field is empty, the ingress controller uses the default - error pages. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - httpHeaders: - description: "httpHeaders defines policy for HTTP headers. \n If this - field is empty, the default values are used." - properties: - forwardedHeaderPolicy: - description: "forwardedHeaderPolicy specifies when and how the - IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, - X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version - HTTP headers. The value may be one of the following: \n * \"Append\", - which specifies that the IngressController appends the headers, - preserving existing headers. \n * \"Replace\", which specifies - that the IngressController sets the headers, replacing any - existing Forwarded or X-Forwarded-* headers. \n * \"IfNone\", - which specifies that the IngressController sets the headers - if they are not already set. \n * \"Never\", which specifies - that the IngressController never sets the headers, preserving - any existing headers. \n By default, the policy is \"Append\"." - enum: - - Append - - Replace - - IfNone - - Never - type: string - headerNameCaseAdjustments: - description: "headerNameCaseAdjustments specifies case adjustments - that can be applied to HTTP header names. Each adjustment is - specified as an HTTP header name with the desired capitalization. - \ For example, specifying \"X-Forwarded-For\" indicates that - the \"x-forwarded-for\" HTTP header should be adjusted to have - the specified capitalization. \n These adjustments are only - applied to cleartext, edge-terminated, and re-encrypt routes, - and only when using HTTP/1. \n For request headers, these adjustments - are applied only for routes that have the haproxy.router.openshift.io/h1-adjust-case=true - annotation. For response headers, these adjustments are applied - to all HTTP responses. \n If this field is empty, no request - headers are adjusted." - items: - description: IngressControllerHTTPHeaderNameCaseAdjustment is - the name of an HTTP header (for example, "X-Forwarded-For") - in the desired capitalization. The value must be a valid - HTTP header name as defined in RFC 2616 section 4.2. - maxLength: 1024 - minLength: 0 - pattern: ^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ - type: string - nullable: true - type: array - uniqueId: - description: "uniqueId describes configuration for a custom HTTP - header that the ingress controller should inject into incoming - HTTP requests. Typically, this header is configured to have - a value that is unique to the HTTP request. The header can - be used by applications or included in access logs to facilitate - tracing individual HTTP requests. \n If this field is empty, - no such header is injected into requests." - properties: - format: - description: 'format specifies the format for the injected - HTTP header''s value. This field has no effect unless name - is specified. For the HAProxy-based ingress controller - implementation, this format uses the same syntax as the - HTTP log format. If the field is empty, the default value - is "%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid"; see the corresponding - HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3' - maxLength: 1024 - minLength: 0 - pattern: ^(%(%|(\{[-+]?[QXE](,[-+]?[QXE])*\})?([A-Za-z]+|\[[.0-9A-Z_a-z]+(\([^)]+\))?(,[.0-9A-Z_a-z]+(\([^)]+\))?)*\]))|[^%[:cntrl:]])*$ - type: string - name: - description: name specifies the name of the HTTP header (for - example, "unique-id") that the ingress controller should - inject into HTTP requests. The field's value must be a - valid HTTP header name as defined in RFC 2616 section 4.2. If - the field is empty, no header is injected. - maxLength: 1024 - minLength: 0 - pattern: ^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ - type: string - type: object - type: object - logging: - description: logging defines parameters for what should be logged - where. If this field is empty, operational logs are enabled but - access logs are disabled. - properties: - access: - description: "access describes how the client requests should - be logged. \n If this field is empty, access logging is disabled." - properties: - destination: - description: destination is where access logs go. - properties: - container: - description: container holds parameters for the Container - logging destination. Present only if type is Container. - type: object - syslog: - description: syslog holds parameters for a syslog endpoint. Present - only if type is Syslog. - oneOf: - - properties: - address: - format: ipv4 - - properties: - address: - format: ipv6 - properties: - address: - description: address is the IP address of the syslog - endpoint that receives log messages. - type: string - facility: - description: "facility specifies the syslog facility - of log messages. \n If this field is empty, the - facility is \"local1\"." - enum: - - kern - - user - - mail - - daemon - - auth - - syslog - - lpr - - news - - uucp - - cron - - auth2 - - ftp - - ntp - - audit - - alert - - cron2 - - local0 - - local1 - - local2 - - local3 - - local4 - - local5 - - local6 - - local7 - type: string - maxLength: - default: 1024 - description: "maxLength is the maximum length of the - syslog message \n If this field is empty, the maxLength - is set to \"1024\"." - format: int32 - maximum: 4096 - minimum: 480 - type: integer - port: - description: port is the UDP port number of the syslog - endpoint that receives log messages. - format: int32 - maximum: 65535 - minimum: 1 - type: integer - required: - - address - - port - type: object - type: - description: "type is the type of destination for logs. - \ It must be one of the following: \n * Container \n - The ingress operator configures the sidecar container - named \"logs\" on the ingress controller pod and configures - the ingress controller to write logs to the sidecar. - \ The logs are then available as container logs. The - expectation is that the administrator configures a custom - logging solution that reads logs from this sidecar. - \ Note that using container logs means that logs may - be dropped if the rate of logs exceeds the container - runtime's or the custom logging solution's capacity. - \n * Syslog \n Logs are sent to a syslog endpoint. The - administrator must specify an endpoint that can receive - syslog messages. The expectation is that the administrator - has configured a custom syslog instance." - enum: - - Container - - Syslog - type: string - required: - - type - type: object - httpCaptureCookies: - description: httpCaptureCookies specifies HTTP cookies that - should be captured in access logs. If this field is empty, - no cookies are captured. - items: - description: IngressControllerCaptureHTTPCookie describes - an HTTP cookie that should be captured. - properties: - matchType: - description: matchType specifies the type of match to - be performed on the cookie name. Allowed values are - "Exact" for an exact string match and "Prefix" for - a string prefix match. If "Exact" is specified, a - name must be specified in the name field. If "Prefix" - is provided, a prefix must be specified in the namePrefix - field. For example, specifying matchType "Prefix" - and namePrefix "foo" will capture a cookie named "foo" - or "foobar" but not one named "bar". The first matching - cookie is captured. - enum: - - Exact - - Prefix - type: string - maxLength: - description: maxLength specifies a maximum length of - the string that will be logged, which includes the - cookie name, cookie value, and one-character delimiter. If - the log entry exceeds this length, the value will - be truncated in the log message. Note that the ingress - controller may impose a separate bound on the total - length of HTTP headers in a request. - maximum: 1024 - minimum: 1 - type: integer - name: - description: name specifies a cookie name. Its value - must be a valid HTTP cookie name as defined in RFC - 6265 section 4.1. - maxLength: 1024 - minLength: 0 - pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$ - type: string - namePrefix: - description: namePrefix specifies a cookie name prefix. Its - value must be a valid HTTP cookie name as defined - in RFC 6265 section 4.1. - maxLength: 1024 - minLength: 0 - pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$ - type: string - required: - - matchType - - maxLength - type: object - maxItems: 1 - nullable: true - type: array - httpCaptureHeaders: - description: "httpCaptureHeaders defines HTTP headers that - should be captured in access logs. If this field is empty, - no headers are captured. \n Note that this option only applies - to cleartext HTTP connections and to secure HTTP connections - for which the ingress controller terminates encryption (that - is, edge-terminated or reencrypt connections). Headers - cannot be captured for TLS passthrough connections." - properties: - request: - description: "request specifies which HTTP request headers - to capture. \n If this field is empty, no request headers - are captured." - items: - description: IngressControllerCaptureHTTPHeader describes - an HTTP header that should be captured. - properties: - maxLength: - description: maxLength specifies a maximum length - for the header value. If a header value exceeds - this length, the value will be truncated in the - log message. Note that the ingress controller - may impose a separate bound on the total length - of HTTP headers in a request. - minimum: 1 - type: integer - name: - description: name specifies a header name. Its - value must be a valid HTTP header name as defined - in RFC 2616 section 4.2. - pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ - type: string - required: - - maxLength - - name - type: object - nullable: true - type: array - response: - description: "response specifies which HTTP response headers - to capture. \n If this field is empty, no response headers - are captured." - items: - description: IngressControllerCaptureHTTPHeader describes - an HTTP header that should be captured. - properties: - maxLength: - description: maxLength specifies a maximum length - for the header value. If a header value exceeds - this length, the value will be truncated in the - log message. Note that the ingress controller - may impose a separate bound on the total length - of HTTP headers in a request. - minimum: 1 - type: integer - name: - description: name specifies a header name. Its - value must be a valid HTTP header name as defined - in RFC 2616 section 4.2. - pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ - type: string - required: - - maxLength - - name - type: object - nullable: true - type: array - type: object - httpLogFormat: - description: "httpLogFormat specifies the format of the log - message for an HTTP request. \n If this field is empty, - log messages use the implementation's default HTTP log format. - \ For HAProxy's default HTTP log format, see the HAProxy - documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 - \n Note that this format only applies to cleartext HTTP - connections and to secure HTTP connections for which the - ingress controller terminates encryption (that is, edge-terminated - or reencrypt connections). It does not affect the log format - for TLS passthrough connections." - type: string - logEmptyRequests: - default: Log - description: logEmptyRequests specifies how connections on - which no request is received should be logged. Typically, - these empty requests come from load balancers' health probes - or Web browsers' speculative connections ("preconnect"), - in which case logging these requests may be undesirable. However, - these requests may also be caused by network errors, in - which case logging empty requests may be useful for diagnosing - the errors. In addition, these requests may be caused by - port scans, in which case logging empty requests may aid - in detecting intrusion attempts. Allowed values for this - field are "Log" and "Ignore". The default value is "Log". - enum: - - Log - - Ignore - type: string - required: - - destination - type: object - type: object - namespaceSelector: - description: "namespaceSelector is used to filter the set of namespaces - serviced by the ingress controller. This is useful for implementing - shards. \n If unset, the default is no filtering." - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - nodePlacement: - description: "nodePlacement enables explicit control over the scheduling - of the ingress controller. \n If unset, defaults are used. See NodePlacement - for more details." - properties: - nodeSelector: - description: "nodeSelector is the node selector applied to ingress - controller deployments. \n If set, the specified selector is - used and replaces the default. \n If unset, the default depends - on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses - status. \n When defaultPlacement is Workers, the default is: - \n kubernetes.io/os: linux node-role.kubernetes.io/worker: - '' \n When defaultPlacement is ControlPlane, the default is: - \n kubernetes.io/os: linux node-role.kubernetes.io/master: - '' \n These defaults are subject to change." - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. - type: object - type: object - tolerations: - description: "tolerations is a list of tolerations applied to - ingress controller deployments. \n The default is an empty list. - \n See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/" - items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, allowed - values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match - all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to - the value. Valid operators are Exists and Equal. Defaults - to Equal. Exists is equivalent to wildcard for value, - so that a pod can tolerate all taints of a particular - category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of - time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the taint - forever (do not evict). Zero and negative values will - be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. - type: string - type: object - type: array - type: object - replicas: - description: "replicas is the desired number of ingress controller - replicas. If unset, the default depends on the value of the defaultPlacement - field in the cluster config.openshift.io/v1/ingresses status. \n - The value of replicas is set based on the value of a chosen field - in the Infrastructure CR. If defaultPlacement is set to ControlPlane, - the chosen field will be controlPlaneTopology. If it is set to Workers - the chosen field will be infrastructureTopology. Replicas will then - be set to 1 or 2 based whether the chosen field's value is SingleReplica - or HighlyAvailable, respectively. \n These defaults are subject - to change." - format: int32 - type: integer - routeAdmission: - description: "routeAdmission defines a policy for handling new route - claims (for example, to allow or deny claims across namespaces). - \n If empty, defaults will be applied. See specific routeAdmission - fields for details about their defaults." - properties: - namespaceOwnership: - description: "namespaceOwnership describes how host name claims - across namespaces should be handled. \n Value must be one of: - \n - Strict: Do not allow routes in different namespaces to - claim the same host. \n - InterNamespaceAllowed: Allow routes - to claim different paths of the same host name across namespaces. - \n If empty, the default is Strict." - enum: - - InterNamespaceAllowed - - Strict - type: string - wildcardPolicy: - description: "wildcardPolicy describes how routes with wildcard - policies should be handled for the ingress controller. WildcardPolicy - controls use of routes [1] exposed by the ingress controller - based on the route's wildcard policy. \n [1] https://github.com/openshift/api/blob/master/route/v1/types.go - \n Note: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed - will cause admitted routes with a wildcard policy of Subdomain - to stop working. These routes must be updated to a wildcard - policy of None to be readmitted by the ingress controller. \n - WildcardPolicy supports WildcardsAllowed and WildcardsDisallowed - values. \n If empty, defaults to \"WildcardsDisallowed\"." - enum: - - WildcardsAllowed - - WildcardsDisallowed - type: string - type: object - routeSelector: - description: "routeSelector is used to filter the set of Routes serviced - by the ingress controller. This is useful for implementing shards. - \n If unset, the default is no filtering." - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - tlsSecurityProfile: - description: "tlsSecurityProfile specifies settings for TLS connections - for ingresscontrollers. \n If unset, the default is based on the - apiservers.config.openshift.io/cluster resource. \n Note that when - using the Old, Intermediate, and Modern profile types, the effective - profile configuration is subject to change between releases. For - example, given a specification to use the Intermediate profile deployed - on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new - profile configuration to be applied to the ingress controller, resulting - in a rollout." - properties: - custom: - description: "custom is a user-defined TLS security profile. Be - extremely careful using a custom profile as invalid configurations - can be catastrophic. An example custom profile looks like this: - \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - \ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - \ minTLSVersion: TLSv1.1" - nullable: true - properties: - ciphers: - description: "ciphers is used to specify the cipher algorithms - that are negotiated during the TLS handshake. Operators - may remove entries their operands do not support. For example, - to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" - items: - type: string - type: array - minTLSVersion: - description: "minTLSVersion is used to specify the minimal - version of the TLS protocol that is negotiated during the - TLS handshake. For example, to use TLS versions 1.1, 1.2 - and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently - the highest minTLSVersion allowed is VersionTLS12" - enum: - - VersionTLS10 - - VersionTLS11 - - VersionTLS12 - - VersionTLS13 - type: string - type: object - intermediate: - description: "intermediate is a TLS security profile based on: - \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 - \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - \ minTLSVersion: TLSv1.2" - nullable: true - type: object - modern: - description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility - \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - \ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." - nullable: true - type: object - old: - description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility - \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - \ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - \ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - \ - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - \ - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - \ - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - \ - AES128-SHA256 - AES256-SHA256 - AES128-SHA - - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" - nullable: true - type: object - type: - description: "type is one of Old, Intermediate, Modern or Custom. - Custom provides the ability to specify individual TLS security - profile parameters. Old, Intermediate and Modern are TLS security - profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations - \n The profiles are intent based, so they may change over time - as new ciphers are developed and existing ciphers are found - to be insecure. Depending on precisely which ciphers are available - to a process, the list may be reduced. \n Note that the Modern - profile is currently not supported because it is not yet well - adopted by common software libraries." - enum: - - Old - - Intermediate - - Modern - - Custom - type: string - type: object - tuningOptions: - anyOf: - - properties: - maxConnections: - enum: - - -1 - - 0 - - properties: - maxConnections: - format: int32 - maximum: 2000000 - minimum: 2000 - description: "tuningOptions defines parameters for adjusting the performance - of ingress controller pods. All fields are optional and will use - their respective defaults if not set. See specific tuningOptions - fields for more details. \n Setting fields within tuningOptions - is generally not recommended. The default values are suitable for - most configurations." - properties: - clientFinTimeout: - description: "clientFinTimeout defines how long a connection will - be held open while waiting for the client response to the server/backend - closing the connection. \n If unset, the default timeout is - 1s" - format: duration - type: string - clientTimeout: - description: "clientTimeout defines how long a connection will - be held open while waiting for a client response. \n If unset, - the default timeout is 30s" - format: duration - type: string - headerBufferBytes: - description: "headerBufferBytes describes how much memory should - be reserved (in bytes) for IngressController connection sessions. - Note that this value must be at least 16384 if HTTP/2 is enabled - for the IngressController (https://tools.ietf.org/html/rfc7540). - If this field is empty, the IngressController will use a default - value of 32768 bytes. \n Setting this field is generally not - recommended as headerBufferBytes values that are too small may - break the IngressController and headerBufferBytes values that - are too large could cause the IngressController to use significantly - more memory than necessary." - format: int32 - minimum: 16384 - type: integer - headerBufferMaxRewriteBytes: - description: "headerBufferMaxRewriteBytes describes how much memory - should be reserved (in bytes) from headerBufferBytes for HTTP - header rewriting and appending for IngressController connection - sessions. Note that incoming HTTP requests will be limited to - (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning - headerBufferBytes must be greater than headerBufferMaxRewriteBytes. - If this field is empty, the IngressController will use a default - value of 8192 bytes. \n Setting this field is generally not - recommended as headerBufferMaxRewriteBytes values that are too - small may break the IngressController and headerBufferMaxRewriteBytes - values that are too large could cause the IngressController - to use significantly more memory than necessary." - format: int32 - minimum: 4096 - type: integer - healthCheckInterval: - description: "healthCheckInterval defines how long the router - waits between two consecutive health checks on its configured - backends. This value is applied globally as a default for all - routes, but may be overridden per-route by the route annotation - \"router.openshift.io/haproxy.health.check.interval\". \n Expects - an unsigned duration string of decimal numbers, each with optional - fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". - Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" - U+03BC), \"ms\", \"s\", \"m\", \"h\". \n Setting this to less - than 5s can cause excess traffic due to too frequent TCP health - checks and accompanying SYN packet storms. Alternatively, setting - this too high can result in increased latency, due to backend - servers that are no longer available, but haven't yet been detected - as such. \n An empty or zero healthCheckInterval means no opinion - and IngressController chooses a default, which is subject to - change over time. Currently the default healthCheckInterval - value is 5s. \n Currently the minimum allowed value is 1s and - the maximum allowed value is 2147483647ms (24.85 days). Both - are subject to change over time." - pattern: ^0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+$ - type: string - maxConnections: - description: "maxConnections defines the maximum number of simultaneous - connections that can be established per HAProxy process. Increasing - this value allows each ingress controller pod to handle more - connections but at the cost of additional system resources being - consumed. \n Permitted values are: empty, 0, -1, and the range - 2000-2000000. \n If this field is empty or 0, the IngressController - will use the default value of 20000, but the default is subject - to change in future releases. \n If the value is -1 then HAProxy - will dynamically compute a maximum value based on the available - ulimits in the running container. Selecting -1 (i.e., auto) - will result in a large value being computed (~520000 on OpenShift - >=4.10 clusters) and therefore each HAProxy process will incur - significant memory usage compared to the current default of - 20000. \n Setting a value that is greater than the current operating - system limit will prevent the HAProxy process from starting. - \n If you choose a discrete value (e.g., 750000) and the router - pod is migrated to a new node, there's no guarantee that that - new node has identical ulimits configured. In such a scenario - the pod would fail to start. If you have nodes with different - ulimits configured (e.g., different tuned profiles) and you - choose a discrete value then the guidance is to use -1 and let - the value be computed dynamically at runtime. \n You can monitor - memory usage for router containers with the following metric: - 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}'. - \n You can monitor memory usage of individual HAProxy processes - in router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}/container_processes{container=\"router\",namespace=\"openshift-ingress\"}'." - format: int32 - type: integer - serverFinTimeout: - description: "serverFinTimeout defines how long a connection will - be held open while waiting for the server/backend response to - the client closing the connection. \n If unset, the default - timeout is 1s" - format: duration - type: string - serverTimeout: - description: "serverTimeout defines how long a connection will - be held open while waiting for a server/backend response. \n - If unset, the default timeout is 30s" - format: duration - type: string - threadCount: - description: "threadCount defines the number of threads created - per HAProxy process. Creating more threads allows each ingress - controller pod to handle more connections, at the cost of more - system resources being used. HAProxy currently supports up to - 64 threads. If this field is empty, the IngressController will - use the default value. The current default is 4 threads, but - this may change in future releases. \n Setting this field is - generally not recommended. Increasing the number of HAProxy - threads allows ingress controller pods to utilize more CPU time - under load, potentially starving other pods if set too high. - Reducing the number of threads may cause the ingress controller - to perform poorly." - format: int32 - maximum: 64 - minimum: 1 - type: integer - tlsInspectDelay: - description: "tlsInspectDelay defines how long the router can - hold data to find a matching route. \n Setting this too short - can cause the router to fall back to the default certificate - for edge-terminated or reencrypt routes even when a better matching - certificate could be used. \n If unset, the default inspect - delay is 5s" - format: duration - type: string - tunnelTimeout: - description: "tunnelTimeout defines how long a tunnel connection - (including websockets) will be held open while the tunnel is - idle. \n If unset, the default timeout is 1h" - format: duration - type: string - type: object - unsupportedConfigOverrides: - description: unsupportedConfigOverrides allows specifying unsupported - configuration options. Its use is unsupported. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - status: - description: status is the most recently observed status of the IngressController. - properties: - availableReplicas: - description: availableReplicas is number of observed available replicas - according to the ingress controller deployment. - format: int32 - type: integer - conditions: - description: "conditions is a list of conditions and their status. - \n Available means the ingress controller deployment is available - and servicing route and ingress resources (i.e, .status.availableReplicas - equals .spec.replicas) \n There are additional conditions which - indicate the status of other ingress controller features and capabilities. - \n * LoadBalancerManaged - True if the following conditions - are met: * The endpoint publishing strategy requires a service - load balancer. - False if any of those conditions are unsatisfied. - \n * LoadBalancerReady - True if the following conditions are - met: * A load balancer is managed. * The load balancer is - ready. - False if any of those conditions are unsatisfied. \n - \ * DNSManaged - True if the following conditions are met: * - The endpoint publishing strategy and platform support DNS. * - The ingress controller domain is set. * dns.config.openshift.io/cluster - configures DNS zones. - False if any of those conditions are unsatisfied. - \n * DNSReady - True if the following conditions are met: * - DNS is managed. * DNS records have been successfully created. - \ - False if any of those conditions are unsatisfied." - items: - description: OperatorCondition is just the standard condition fields. - properties: - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - type: object - type: array - domain: - description: domain is the actual domain in use. - type: string - endpointPublishingStrategy: - description: endpointPublishingStrategy is the actual strategy in - use. - properties: - hostNetwork: - description: hostNetwork holds parameters for the HostNetwork - endpoint publishing strategy. Present only if type is HostNetwork. - properties: - httpPort: - default: 80 - description: httpPort is the port on the host which should - be used to listen for HTTP requests. This field should be - set when port 80 is already in use. The value should not - coincide with the NodePort range of the cluster. When the - value is 0 or is not specified it defaults to 80. - format: int32 - maximum: 65535 - minimum: 0 - type: integer - httpsPort: - default: 443 - description: httpsPort is the port on the host which should - be used to listen for HTTPS requests. This field should - be set when port 443 is already in use. The value should - not coincide with the NodePort range of the cluster. When - the value is 0 or is not specified it defaults to 443. - format: int32 - maximum: 65535 - minimum: 0 - type: integer - protocol: - description: "protocol specifies whether the IngressController - expects incoming connections to use plain TCP or whether - the IngressController expects PROXY protocol. \n PROXY protocol - can be used with load balancers that support it to communicate - the source addresses of client connections when forwarding - those connections to the IngressController. Using PROXY - protocol enables the IngressController to report those source - addresses instead of reporting the load balancer's address - in HTTP headers and logs. Note that enabling PROXY protocol - on the IngressController will cause connections to fail - if you are not using a load balancer that uses PROXY protocol - to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt - for information about PROXY protocol. \n The following values - are valid for this field: \n * The empty string. * \"TCP\". - * \"PROXY\". \n The empty string specifies the default, - which is TCP without PROXY protocol. Note that the default - is subject to change." - enum: - - "" - - TCP - - PROXY - type: string - statsPort: - default: 1936 - description: statsPort is the port on the host where the stats - from the router are published. The value should not coincide - with the NodePort range of the cluster. If an external load - balancer is configured to forward connections to this IngressController, - the load balancer should use this port for health checks. - The load balancer can send HTTP probes on this port on a - given node, with the path /healthz/ready to determine if - the ingress controller is ready to receive traffic on the - node. For proper operation the load balancer must not forward - traffic to a node until the health check reports ready. - The load balancer should also stop forwarding requests within - a maximum of 45 seconds after /healthz/ready starts reporting - not-ready. Probing every 5 to 10 seconds, with a 5-second - timeout and with a threshold of two successful or failed - requests to become healthy or unhealthy respectively, are - well-tested values. When the value is 0 or is not specified - it defaults to 1936. - format: int32 - maximum: 65535 - minimum: 0 - type: integer - type: object - loadBalancer: - description: loadBalancer holds parameters for the load balancer. - Present only if type is LoadBalancerService. - properties: - providerParameters: - description: "providerParameters holds desired load balancer - information specific to the underlying infrastructure provider. - \n If empty, defaults will be applied. See specific providerParameters - fields for details about their defaults." - properties: - aws: - description: "aws provides configuration settings that - are specific to AWS load balancers. \n If empty, defaults - will be applied. See specific aws fields for details - about their defaults." - properties: - classicLoadBalancer: - description: classicLoadBalancerParameters holds configuration - parameters for an AWS classic load balancer. Present - only if type is Classic. - properties: - connectionIdleTimeout: - description: connectionIdleTimeout specifies the - maximum time period that a connection may be - idle before the load balancer closes the connection. The - value must be parseable as a time duration value; - see . A - nil or zero value means no opinion, in which - case a default value is used. The default value - for this field is 60s. This default is subject - to change. - format: duration - type: string - type: object - networkLoadBalancer: - description: networkLoadBalancerParameters holds configuration - parameters for an AWS network load balancer. Present - only if type is NLB. - type: object - type: - description: "type is the type of AWS load balancer - to instantiate for an ingresscontroller. \n Valid - values are: \n * \"Classic\": A Classic Load Balancer - that makes routing decisions at either the transport - layer (TCP/SSL) or the application layer (HTTP/HTTPS). - See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb - \n * \"NLB\": A Network Load Balancer that makes - routing decisions at the transport layer (TCP/SSL). - See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" - enum: - - Classic - - NLB - type: string - required: - - type - type: object - gcp: - description: "gcp provides configuration settings that - are specific to GCP load balancers. \n If empty, defaults - will be applied. See specific gcp fields for details - about their defaults." - properties: - clientAccess: - description: "clientAccess describes how client access - is restricted for internal load balancers. \n Valid - values are: * \"Global\": Specifying an internal - load balancer with Global client access allows - clients from any region within the VPC to communicate - with the load balancer. \n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access - \n * \"Local\": Specifying an internal load balancer - with Local client access means only clients within - the same region (and VPC) as the GCP load balancer - \ can communicate with the load balancer. Note - that this is the default behavior. \n https://cloud.google.com/load-balancing/docs/internal#client_access" - enum: - - Global - - Local - type: string - type: object - type: - description: type is the underlying infrastructure provider - for the load balancer. Allowed values are "AWS", "Azure", - "BareMetal", "GCP", "Nutanix", "OpenStack", and "VSphere". - enum: - - AWS - - Azure - - BareMetal - - GCP - - Nutanix - - OpenStack - - VSphere - - IBM - type: string - required: - - type - type: object - scope: - description: scope indicates the scope at which the load balancer - is exposed. Possible values are "External" and "Internal". - enum: - - Internal - - External - type: string - required: - - scope - type: object - nodePort: - description: nodePort holds parameters for the NodePortService - endpoint publishing strategy. Present only if type is NodePortService. - properties: - protocol: - description: "protocol specifies whether the IngressController - expects incoming connections to use plain TCP or whether - the IngressController expects PROXY protocol. \n PROXY protocol - can be used with load balancers that support it to communicate - the source addresses of client connections when forwarding - those connections to the IngressController. Using PROXY - protocol enables the IngressController to report those source - addresses instead of reporting the load balancer's address - in HTTP headers and logs. Note that enabling PROXY protocol - on the IngressController will cause connections to fail - if you are not using a load balancer that uses PROXY protocol - to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt - for information about PROXY protocol. \n The following values - are valid for this field: \n * The empty string. * \"TCP\". - * \"PROXY\". \n The empty string specifies the default, - which is TCP without PROXY protocol. Note that the default - is subject to change." - enum: - - "" - - TCP - - PROXY - type: string - type: object - private: - description: private holds parameters for the Private endpoint - publishing strategy. Present only if type is Private. - type: object - type: - description: "type is the publishing strategy to use. Valid values - are: \n * LoadBalancerService \n Publishes the ingress controller - using a Kubernetes LoadBalancer Service. \n In this configuration, - the ingress controller deployment uses container networking. - A LoadBalancer Service is created to publish the deployment. - \n See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer - \n If domain is set, a wildcard DNS record will be managed to - point at the LoadBalancer Service's external name. DNS records - are managed only in DNS zones defined by dns.config.openshift.io/cluster - .spec.publicZone and .spec.privateZone. \n Wildcard DNS management - is currently supported only on the AWS, Azure, and GCP platforms. - \n * HostNetwork \n Publishes the ingress controller on node - ports where the ingress controller is deployed. \n In this configuration, - the ingress controller deployment uses host networking, bound - to node ports 80 and 443. The user is responsible for configuring - an external load balancer to publish the ingress controller - via the node ports. \n * Private \n Does not publish the ingress - controller. \n In this configuration, the ingress controller - deployment uses container networking, and is not explicitly - published. The user must manually publish the ingress controller. - \n * NodePortService \n Publishes the ingress controller using - a Kubernetes NodePort Service. \n In this configuration, the - ingress controller deployment uses container networking. A NodePort - Service is created to publish the deployment. The specific node - ports are dynamically allocated by OpenShift; however, to support - static port allocations, user changes to the node port field - of the managed NodePort Service will preserved." - enum: - - LoadBalancerService - - HostNetwork - - Private - - NodePortService - type: string - required: - - type - type: object - namespaceSelector: - description: namespaceSelector is the actual namespaceSelector in - use. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - observedGeneration: - description: observedGeneration is the most recent generation observed. - format: int64 - type: integer - routeSelector: - description: routeSelector is the actual routeSelector in use. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - selector: - description: selector is a label selector, in string format, for ingress - controller pods corresponding to the IngressController. The number - of matching pods should equal the value of availableReplicas. - type: string - tlsProfile: - description: tlsProfile is the TLS connection configuration that is - in effect. - properties: - ciphers: - description: "ciphers is used to specify the cipher algorithms - that are negotiated during the TLS handshake. Operators may - remove entries their operands do not support. For example, - to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" - items: - type: string - type: array - minTLSVersion: - description: "minTLSVersion is used to specify the minimal version - of the TLS protocol that is negotiated during the TLS handshake. - For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n - \ minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion - allowed is VersionTLS12" - enum: - - VersionTLS10 - - VersionTLS11 - - VersionTLS12 - - VersionTLS13 - type: string - type: object - type: object - type: object - served: true - storage: true - subresources: - scale: - labelSelectorPath: .status.selector - specReplicasPath: .spec.replicas - statusReplicasPath: .status.availableReplicas - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml-patch deleted file mode 100644 index f9336304f..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml-patch +++ /dev/null @@ -1,32 +0,0 @@ -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/logging/properties/access/properties/destination/properties/syslog/oneOf - value: - - properties: - address: - format: ipv4 - - properties: - address: - format: ipv6 -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/tuningOptions/anyOf - # We explicitly choose anyOf to allow: - # - # - the enum - # - the range - # - and null - # - # If we specify oneOf that only allows for one of 'the enum' or 'the - # range'. Anything outside of that is invalid. However, we want to - # allow 'null' because maxConnections is an optional field. Using - # anyOf allows for 'the enum', 'the range', or 'null'. By allowing - # 'null' we provide a consistent user experience given that there - # are other optional integer fields in tuningOptions. - value: - - properties: - maxConnections: - enum: [ -1, 0 ] - - properties: - maxConnections: - format: int32 - minimum: 2000 - maximum: 2000000 diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml deleted file mode 100644 index 3c7a67d61..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml +++ /dev/null @@ -1,135 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/475 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: servicecas.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: ServiceCA - listKind: ServiceCAList - plural: servicecas - singular: serviceca - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "ServiceCA provides information to configure an operator to manage the service cert controllers \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - logLevel: - description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - managementState: - description: managementState indicates whether and how the operator should manage the component - type: string - pattern: ^(Managed|Unmanaged|Force|Removed)$ - observedConfig: - description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - description: status holds observed values from the cluster. They may not be overridden. - type: object - properties: - conditions: - description: conditions is a list of conditions and their status - type: array - items: - description: OperatorCondition is just the standard condition fields. - type: object - properties: - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - generations: - description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - type: array - items: - description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. - type: object - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload controller involved - type: integer - format: int64 - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're tracking - type: string - observedGeneration: - description: observedGeneration is the last generation change you've dealt with - type: integer - format: int64 - readyReplicas: - description: readyReplicas indicates how many replicas are ready and at the desired state - type: integer - format: int32 - version: - description: version is the level this availability applies to - type: string - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml deleted file mode 100644 index 6ca60be27..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml +++ /dev/null @@ -1,690 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/475 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: networks.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: Network - listKind: NetworkList - plural: networks - singular: network - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Network describes the cluster's desired network configuration. - It is consumed by the cluster-network-operator. \n Compatibility level 1: - Stable within a major release for a minimum of 12 months or 3 minor releases - (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: NetworkSpec is the top-level network configuration object. - properties: - additionalNetworks: - description: additionalNetworks is a list of extra networks to make - available to pods when multiple networks are enabled. - items: - description: AdditionalNetworkDefinition configures an extra network - that is available but not created by default. Instead, pods must - request them by name. type must be specified, along with exactly - one "Config" that matches the type. - properties: - name: - description: name is the name of the network. This will be populated - in the resulting CRD This must be unique. - type: string - namespace: - description: namespace is the namespace of the network. This - will be populated in the resulting CRD If not given the network - will be created in the default namespace. - type: string - rawCNIConfig: - description: rawCNIConfig is the raw CNI configuration json - to create in the NetworkAttachmentDefinition CRD - type: string - simpleMacvlanConfig: - description: SimpleMacvlanConfig configures the macvlan interface - in case of type:NetworkTypeSimpleMacvlan - properties: - ipamConfig: - description: IPAMConfig configures IPAM module will be used - for IP Address Management (IPAM). - properties: - staticIPAMConfig: - description: StaticIPAMConfig configures the static - IP address in case of type:IPAMTypeStatic - properties: - addresses: - description: Addresses configures IP address for - the interface - items: - description: StaticIPAMAddresses provides IP address - and Gateway for static IPAM addresses - properties: - address: - description: Address is the IP address in - CIDR format - type: string - gateway: - description: Gateway is IP inside of subnet - to designate as the gateway - type: string - type: object - type: array - dns: - description: DNS configures DNS for the interface - properties: - domain: - description: Domain configures the domainname - the local domain used for short hostname lookups - type: string - nameservers: - description: Nameservers points DNS servers - for IP lookup - items: - type: string - type: array - search: - description: Search configures priority ordered - search domains for short hostname lookups - items: - type: string - type: array - type: object - routes: - description: Routes configures IP routes for the - interface - items: - description: StaticIPAMRoutes provides Destination/Gateway - pairs for static IPAM routes - properties: - destination: - description: Destination points the IP route - destination - type: string - gateway: - description: Gateway is the route's next-hop - IP address If unset, a default gateway is - assumed (as determined by the CNI plugin). - type: string - type: object - type: array - type: object - type: - description: Type is the type of IPAM module will be - used for IP Address Management(IPAM). The supported - values are IPAMTypeDHCP, IPAMTypeStatic - type: string - type: object - master: - description: master is the host interface to create the - macvlan interface from. If not specified, it will be default - route interface - type: string - mode: - description: 'mode is the macvlan mode: bridge, private, - vepa, passthru. The default is bridge' - type: string - mtu: - description: mtu is the mtu to use for the macvlan interface. - if unset, host's kernel will select the value. - format: int32 - minimum: 0 - type: integer - type: object - type: - description: type is the type of network The supported values - are NetworkTypeRaw, NetworkTypeSimpleMacvlan - type: string - type: object - type: array - clusterNetwork: - description: clusterNetwork is the IP address pool to use for pod - IPs. Some network providers, e.g. OpenShift SDN, support multiple - ClusterNetworks. Others only support one. This is equivalent to - the cluster-cidr. - items: - description: ClusterNetworkEntry is a subnet from which to allocate - PodIPs. A network of size HostPrefix (in CIDR notation) will be - allocated when nodes join the cluster. If the HostPrefix field - is not used by the plugin, it can be left unset. Not all network - providers support multiple ClusterNetworks - properties: - cidr: - type: string - hostPrefix: - format: int32 - minimum: 0 - type: integer - type: object - type: array - defaultNetwork: - description: defaultNetwork is the "default" network that all pods - will receive - properties: - kuryrConfig: - description: KuryrConfig configures the kuryr plugin - properties: - controllerProbesPort: - description: The port kuryr-controller will listen for readiness - and liveness requests. - format: int32 - minimum: 0 - type: integer - daemonProbesPort: - description: The port kuryr-daemon will listen for readiness - and liveness requests. - format: int32 - minimum: 0 - type: integer - enablePortPoolsPrepopulation: - description: enablePortPoolsPrepopulation when true will make - Kuryr prepopulate each newly created port pool with a minimum - number of ports. Kuryr uses Neutron port pooling to fight - the fact that it takes a significant amount of time to create - one. It creates a number of ports when the first pod that - is configured to use the dedicated network for pods is created - in a namespace, and keeps them ready to be attached to pods. - Port prepopulation is disabled by default. - type: boolean - mtu: - description: mtu is the MTU that Kuryr should use when creating - pod networks in Neutron. The value has to be lower or equal - to the MTU of the nodes network and Neutron has to allow - creation of tenant networks with such MTU. If unset Pod - networks will be created with the same MTU as the nodes - network has. - format: int32 - minimum: 0 - type: integer - openStackServiceNetwork: - description: openStackServiceNetwork contains the CIDR of - network from which to allocate IPs for OpenStack Octavia's - Amphora VMs. Please note that with Amphora driver Octavia - uses two IPs from that network for each loadbalancer - one - given by OpenShift and second for VRRP connections. As the - first one is managed by OpenShift's and second by Neutron's - IPAMs, those need to come from different pools. Therefore - `openStackServiceNetwork` needs to be at least twice the - size of `serviceNetwork`, and whole `serviceNetwork` must - be overlapping with `openStackServiceNetwork`. cluster-network-operator - will then make sure VRRP IPs are taken from the ranges inside - `openStackServiceNetwork` that are not overlapping with - `serviceNetwork`, effectivly preventing conflicts. If not - set cluster-network-operator will use `serviceNetwork` expanded - by decrementing the prefix size by 1. - type: string - poolBatchPorts: - description: poolBatchPorts sets a number of ports that should - be created in a single batch request to extend the port - pool. The default is 3. For more information about port - pools see enablePortPoolsPrepopulation setting. - minimum: 0 - type: integer - poolMaxPorts: - description: poolMaxPorts sets a maximum number of free ports - that are being kept in a port pool. If the number of ports - exceeds this setting, free ports will get deleted. Setting - 0 will disable this upper bound, effectively preventing - pools from shrinking and this is the default value. For - more information about port pools see enablePortPoolsPrepopulation - setting. - minimum: 0 - type: integer - poolMinPorts: - description: poolMinPorts sets a minimum number of free ports - that should be kept in a port pool. If the number of ports - is lower than this setting, new ports will get created and - added to pool. The default is 1. For more information about - port pools see enablePortPoolsPrepopulation setting. - minimum: 1 - type: integer - type: object - openshiftSDNConfig: - description: openShiftSDNConfig configures the openshift-sdn plugin - properties: - enableUnidling: - description: enableUnidling controls whether or not the service - proxy will support idling and unidling of services. By default, - unidling is enabled. - type: boolean - mode: - description: mode is one of "Multitenant", "Subnet", or "NetworkPolicy" - type: string - mtu: - description: mtu is the mtu to use for the tunnel interface. - Defaults to 1450 if unset. This must be 50 bytes smaller - than the machine's uplink. - format: int32 - minimum: 0 - type: integer - useExternalOpenvswitch: - description: 'useExternalOpenvswitch used to control whether - the operator would deploy an OVS DaemonSet itself or expect - someone else to start OVS. As of 4.6, OVS is always run - as a system service, and this flag is ignored. DEPRECATED: - non-functional as of 4.6' - type: boolean - vxlanPort: - description: vxlanPort is the port to use for all vxlan packets. - The default is 4789. - format: int32 - minimum: 0 - type: integer - type: object - ovnKubernetesConfig: - description: ovnKubernetesConfig configures the ovn-kubernetes - plugin. - properties: - gatewayConfig: - description: gatewayConfig holds the configuration for node - gateway options. - properties: - routingViaHost: - default: false - description: RoutingViaHost allows pod egress traffic - to exit via the ovn-k8s-mp0 management port into the - host before sending it out. If this is not set, traffic - will always egress directly from OVN to outside without - touching the host stack. Setting this to true means - hardware offload will not be supported. Default is false - if GatewayConfig is specified. - type: boolean - type: object - genevePort: - description: geneve port is the UDP port to be used by geneve - encapulation. Default is 6081 - format: int32 - minimum: 1 - type: integer - hybridOverlayConfig: - description: HybridOverlayConfig configures an additional - overlay network for peers that are not using OVN. - properties: - hybridClusterNetwork: - description: HybridClusterNetwork defines a network space - given to nodes on an additional overlay network. - items: - description: ClusterNetworkEntry is a subnet from which - to allocate PodIPs. A network of size HostPrefix (in - CIDR notation) will be allocated when nodes join the - cluster. If the HostPrefix field is not used by the - plugin, it can be left unset. Not all network providers - support multiple ClusterNetworks - properties: - cidr: - type: string - hostPrefix: - format: int32 - minimum: 0 - type: integer - type: object - type: array - hybridOverlayVXLANPort: - description: HybridOverlayVXLANPort defines the VXLAN - port number to be used by the additional overlay network. - Default is 4789 - format: int32 - type: integer - type: object - ipsecConfig: - description: ipsecConfig enables and configures IPsec for - pods on the pod network within the cluster. - type: object - mtu: - description: mtu is the MTU to use for the tunnel interface. - This must be 100 bytes smaller than the uplink mtu. Default - is 1400 - format: int32 - minimum: 0 - type: integer - policyAuditConfig: - default: {} - description: policyAuditConfig is the configuration for network - policy audit events. If unset, reported defaults are used. - properties: - destination: - default: "null" - description: 'destination is the location for policy log - messages. Regardless of this config, persistent logs - will always be dumped to the host at /var/log/ovn/ however - Additionally syslog output may be configured as follows. - Valid values are: - "libc" -> to use the libc syslog() - function of the host node''s journdald process - "udp:host:port" - -> for sending syslog over UDP - "unix:file" -> for - using the UNIX domain socket directly - "null" -> to - discard all messages logged to syslog The default is - "null"' - type: string - maxFileSize: - default: 50 - description: maxFilesSize is the max size an ACL_audit - log file is allowed to reach before rotation occurs - Units are in MB and the Default is 50MB - format: int32 - minimum: 1 - type: integer - rateLimit: - default: 20 - description: rateLimit is the approximate maximum number - of messages to generate per-second per-node. If unset - the default of 20 msg/sec is used. - format: int32 - minimum: 1 - type: integer - syslogFacility: - default: local0 - description: syslogFacility the RFC5424 facility for generated - messages, e.g. "kern". Default is "local0" - type: string - type: object - type: object - type: - description: type is the type of network All NetworkTypes are - supported except for NetworkTypeRaw - type: string - type: object - deployKubeProxy: - description: deployKubeProxy specifies whether or not a standalone - kube-proxy should be deployed by the operator. Some network providers - include kube-proxy or similar functionality. If unset, the plugin - will attempt to select the correct value, which is false when OpenShift - SDN and ovn-kubernetes are used and true otherwise. - type: boolean - disableMultiNetwork: - description: disableMultiNetwork specifies whether or not multiple - pod network support should be disabled. If unset, this property - defaults to 'false' and multiple network support is enabled. - type: boolean - disableNetworkDiagnostics: - default: false - description: disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck - CRs from a test pod to every node, apiserver and LB should be disabled - or not. If unset, this property defaults to 'false' and network - diagnostics is enabled. Setting this to 'true' would reduce the - additional load of the pods performing the checks. - type: boolean - exportNetworkFlows: - description: exportNetworkFlows enables and configures the export - of network flow metadata from the pod network by using protocols - NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes - plugin. If unset, flows will not be exported to any collector. - properties: - ipfix: - description: ipfix defines IPFIX configuration. - properties: - collectors: - description: ipfixCollectors is list of strings formatted - as ip:port with a maximum of ten items - items: - pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - type: string - maxItems: 10 - minItems: 1 - type: array - type: object - netFlow: - description: netFlow defines the NetFlow configuration. - properties: - collectors: - description: netFlow defines the NetFlow collectors that will - consume the flow data exported from OVS. It is a list of - strings formatted as ip:port with a maximum of ten items - items: - pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - type: string - maxItems: 10 - minItems: 1 - type: array - type: object - sFlow: - description: sFlow defines the SFlow configuration. - properties: - collectors: - description: sFlowCollectors is list of strings formatted - as ip:port with a maximum of ten items - items: - pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - type: string - maxItems: 10 - minItems: 1 - type: array - type: object - type: object - kubeProxyConfig: - description: kubeProxyConfig lets us configure desired proxy configuration. - If not specified, sensible defaults will be chosen by OpenShift - directly. Not consumed by all network providers - currently only - openshift-sdn. - properties: - bindAddress: - description: The address to "bind" on Defaults to 0.0.0.0 - type: string - iptablesSyncPeriod: - description: 'An internal kube-proxy parameter. In older releases - of OCP, this sometimes needed to be adjusted in large clusters - for performance reasons, but this is no longer necessary, and - there is no reason to change this from the default value. Default: - 30s' - type: string - proxyArguments: - additionalProperties: - description: ProxyArgumentList is a list of arguments to pass - to the kubeproxy process - items: - type: string - type: array - description: Any additional arguments to pass to the kubeproxy - process - type: object - type: object - logLevel: - default: Normal - description: "logLevel is an intent based logging for an overall component. - \ It does not give fine grained control, but it is a simple way - to manage coarse grained logging choices that operators have to - interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", - \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - type: string - managementState: - description: managementState indicates whether and how the operator - should manage the component - pattern: ^(Managed|Unmanaged|Force|Removed)$ - type: string - migration: - description: migration enables and configures the cluster network - migration. The migration procedure allows to change the network - type and the MTU. - properties: - mtu: - description: mtu contains the MTU migration configuration. Set - this to allow changing the MTU values for the default network. - If unset, the operation of changing the MTU for the default - network will be rejected. - properties: - machine: - description: machine contains MTU migration configuration - for the machine's uplink. Needs to be migrated along with - the default network MTU unless the current uplink MTU already - accommodates the default network MTU. - properties: - from: - description: from is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: to is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - network: - description: network contains information about MTU migration - for the default network. Migrations are only allowed to - MTU values lower than the machine's uplink MTU by the minimum - appropriate offset. - properties: - from: - description: from is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: to is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - type: object - networkType: - description: networkType is the target type of network migration. - Set this to the target network type to allow changing the default - network. If unset, the operation of changing cluster default - network plugin will be rejected. The supported values are OpenShiftSDN, - OVNKubernetes - type: string - type: object - observedConfig: - description: observedConfig holds a sparse config that controller - has observed from the cluster state. It exists in spec because - it is an input to the level for the operator - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - default: Normal - description: "operatorLogLevel is an intent based logging for the - operator itself. It does not give fine grained control, but it - is a simple way to manage coarse grained logging choices that operators - have to interpret for themselves. \n Valid values are: \"Normal\", - \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - type: string - serviceNetwork: - description: serviceNetwork is the ip address pool to use for Service - IPs Currently, all existing network providers only support a single - value here, but this is an array to allow for growth. - items: - type: string - type: array - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that - will override any previously set options. It only needs to be the - fields to override it will end up overlaying in the following order: - 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - useMultiNetworkPolicy: - description: useMultiNetworkPolicy enables a controller which allows - for MultiNetworkPolicy objects to be used on additional networks - as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy - objects, but NetworkPolicy objects only apply to the primary interface. - With MultiNetworkPolicy, you can control the traffic that a pod - can receive over the secondary interfaces. If unset, this property - defaults to 'false' and MultiNetworkPolicy objects are ignored. - If 'disableMultiNetwork' is 'true' then the value of this field - is ignored. - type: boolean - type: object - status: - description: NetworkStatus is detailed operator status, which is distilled - up to the Network clusteroperator object. - properties: - conditions: - description: conditions is a list of conditions and their status - items: - description: OperatorCondition is just the standard condition fields. - properties: - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - type: object - type: array - generations: - description: generations are used to determine when an item needs - to be reconciled or has changed in a way that needs a reaction. - items: - description: GenerationStatus keeps track of the generation for - a given resource so that decisions about forced updates can be - made. - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without - generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload - controller involved - format: int64 - type: integer - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're - tracking - type: string - type: object - type: array - observedGeneration: - description: observedGeneration is the last generation change you've - dealt with - format: int64 - type: integer - readyReplicas: - description: readyReplicas indicates how many replicas are ready and - at the desired state - format: int32 - type: integer - version: - description: version is the level this availability applies to - type: string - type: object - type: object - served: true - storage: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml-patch deleted file mode 100644 index 094f526ec..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml-patch +++ /dev/null @@ -1,3 +0,0 @@ -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/defaultNetwork/properties/ovnKubernetesConfig/properties/policyAuditConfig/default - value: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml deleted file mode 100644 index 7a7492d6e..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml +++ /dev/null @@ -1,275 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/486 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: consoles.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: Console - listKind: ConsoleList - plural: consoles - singular: console - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Console provides a means to configure an operator to manage the console. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ConsoleSpec is the specification of the desired behavior of the Console. - type: object - properties: - customization: - description: customization is used to optionally provide a small set of customization options to the web console. - type: object - properties: - addPage: - description: addPage allows customizing actions on the Add page in developer perspective. - type: object - properties: - disabledActions: - description: disabledActions is a list of actions that are not shown to users. Each action in the list is represented by its ID. - type: array - minItems: 1 - items: - type: string - brand: - description: brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout. - type: string - pattern: ^$|^(ocp|origin|okd|dedicated|online|azure)$ - customLogoFile: - description: 'customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like ''oc create configmap custom-logo --from-file=/path/to/file -n openshift-config''. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred' - type: object - properties: - key: - description: Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. - type: string - name: - type: string - customProductName: - description: customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog instead of the normal OpenShift product name. - type: string - developerCatalog: - description: developerCatalog allows to configure the shown developer catalog categories. - type: object - properties: - categories: - description: categories which are shown in the developer catalog. - type: array - items: - description: DeveloperConsoleCatalogCategory for the developer console catalog. - type: object - required: - - id - - label - properties: - id: - description: ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. - type: string - maxLength: 32 - minLength: 1 - pattern: ^[A-Za-z0-9-_]+$ - label: - description: label defines a category display label. It is required and must have 1-64 characters. - type: string - maxLength: 64 - minLength: 1 - subcategories: - description: subcategories defines a list of child categories. - type: array - items: - description: DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category. - type: object - required: - - id - - label - properties: - id: - description: ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. - type: string - maxLength: 32 - minLength: 1 - pattern: ^[A-Za-z0-9-_]+$ - label: - description: label defines a category display label. It is required and must have 1-64 characters. - type: string - maxLength: 64 - minLength: 1 - tags: - description: tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item. - type: array - items: - type: string - tags: - description: tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item. - type: array - items: - type: string - documentationBaseURL: - description: documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout. - type: string - pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))\/$ - projectAccess: - description: projectAccess allows customizing the available list of ClusterRoles in the Developer perspective Project access page which can be used by a project admin to specify roles to other users and restrict access within the project. If set, the list will replace the default ClusterRole options. - type: object - properties: - availableClusterRoles: - description: availableClusterRoles is the list of ClusterRole names that are assignable to users through the project access tab. - type: array - items: - type: string - quickStarts: - description: quickStarts allows customization of available ConsoleQuickStart resources in console. - type: object - properties: - disabled: - description: disabled is a list of ConsoleQuickStart resource names that are not shown to users. - type: array - items: - type: string - logLevel: - description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - managementState: - description: managementState indicates whether and how the operator should manage the component - type: string - pattern: ^(Managed|Unmanaged|Force|Removed)$ - observedConfig: - description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - plugins: - description: plugins defines a list of enabled console plugin names. - type: array - items: - type: string - providers: - description: providers contains configuration for using specific service providers. - type: object - properties: - statuspage: - description: statuspage contains ID for statuspage.io page that provides status info about. - type: object - properties: - pageID: - description: pageID is the unique ID assigned by Statuspage for your page. This must be a public page. - type: string - route: - description: route contains hostname and secret reference that contains the serving certificate. If a custom route is specified, a new route will be created with the provided hostname, under which console will be available. In case of custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. The default console route will be maintained to reserve the default hostname for console if the custom route is removed. If not specified, default route will be used. DEPRECATED - type: object - properties: - hostname: - description: hostname is the desired custom domain under which console will be available. - type: string - secret: - description: 'secret points to secret in the openshift-config namespace that contains custom certificate and key and needs to be created manually by the cluster admin. Referenced Secret is required to contain following key value pairs: - "tls.crt" - to specifies custom certificate - "tls.key" - to specifies private key of the custom certificate If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.' - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - description: ConsoleStatus defines the observed status of the Console. - type: object - properties: - conditions: - description: conditions is a list of conditions and their status - type: array - items: - description: OperatorCondition is just the standard condition fields. - type: object - properties: - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - generations: - description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - type: array - items: - description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. - type: object - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload controller involved - type: integer - format: int64 - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're tracking - type: string - observedGeneration: - description: observedGeneration is the last generation change you've dealt with - type: integer - format: int64 - readyReplicas: - description: readyReplicas indicates how many replicas are ready and at the desired state - type: integer - format: int32 - version: - description: version is the level this availability applies to - type: string - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml deleted file mode 100644 index ccbed9c0f..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml +++ /dev/null @@ -1,493 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/475 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: dnses.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: DNS - listKind: DNSList - plural: dnses - singular: dns - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "DNS manages the CoreDNS component to provide a name resolution - service for pods and services in the cluster. \n This supports the DNS-based - service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md - \n More details: https://kubernetes.io/docs/tasks/administer-cluster/coredns - \n Compatibility level 1: Stable within a major release for a minimum of - 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec is the specification of the desired behavior of the - DNS. - properties: - logLevel: - default: Normal - description: 'logLevel describes the desired logging verbosity for - CoreDNS. Any one of the following values may be specified: * Normal - logs errors from upstream resolvers. * Debug logs errors, NXDOMAIN - responses, and NODATA responses. * Trace logs errors and all responses. Setting - logLevel: Trace will produce extremely verbose logs. Valid values - are: "Normal", "Debug", "Trace". Defaults to "Normal".' - enum: - - Normal - - Debug - - Trace - type: string - managementState: - description: managementState indicates whether the DNS operator should - manage cluster DNS - pattern: ^(Managed|Unmanaged|Force|Removed)$ - type: string - nodePlacement: - description: "nodePlacement provides explicit control over the scheduling - of DNS pods. \n Generally, it is useful to run a DNS pod on every - node so that DNS queries are always handled by a local DNS pod instead - of going over the network to a DNS pod on another node. However, - security policies may require restricting the placement of DNS pods - to specific nodes. For example, if a security policy prohibits pods - on arbitrary nodes from communicating with the API, a node selector - can be specified to restrict DNS pods to nodes that are permitted - to communicate with the API. Conversely, if running DNS pods on - nodes with a particular taint is desired, a toleration can be specified - for that taint. \n If unset, defaults are used. See nodePlacement - for more details." - properties: - nodeSelector: - additionalProperties: - type: string - description: "nodeSelector is the node selector applied to DNS - pods. \n If empty, the default is used, which is currently the - following: \n kubernetes.io/os: linux \n This default is subject - to change. \n If set, the specified selector is used and replaces - the default." - type: object - tolerations: - description: "tolerations is a list of tolerations applied to - DNS pods. \n If empty, the DNS operator sets a toleration for - the \"node-role.kubernetes.io/master\" taint. This default - is subject to change. Specifying tolerations without including - a toleration for the \"node-role.kubernetes.io/master\" taint - may be risky as it could lead to an outage if all worker nodes - become unavailable. \n Note that the daemon controller adds - some tolerations as well. See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/" - items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, allowed - values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match - all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to - the value. Valid operators are Exists and Equal. Defaults - to Equal. Exists is equivalent to wildcard for value, - so that a pod can tolerate all taints of a particular - category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of - time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the taint - forever (do not evict). Zero and negative values will - be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. - type: string - type: object - type: array - type: object - operatorLogLevel: - default: Normal - description: 'operatorLogLevel controls the logging level of the DNS - Operator. Valid values are: "Normal", "Debug", "Trace". Defaults - to "Normal". setting operatorLogLevel: Trace will produce extremely - verbose logs.' - enum: - - Normal - - Debug - - Trace - type: string - servers: - description: "servers is a list of DNS resolvers that provide name - query delegation for one or more subdomains outside the scope of - the cluster domain. If servers consists of more than one Server, - longest suffix match will be used to determine the Server. \n For - example, if there are two Servers, one for \"foo.com\" and another - for \"a.foo.com\", and the name query is for \"www.a.foo.com\", - it will be routed to the Server with Zone \"a.foo.com\". \n If this - field is nil, no servers are created." - items: - description: Server defines the schema for a server that runs per - instance of CoreDNS. - properties: - forwardPlugin: - description: forwardPlugin defines a schema for configuring - CoreDNS to proxy DNS messages to upstream resolvers. - properties: - policy: - default: Random - description: "policy is used to determine the order in which - upstream servers are selected for querying. Any one of - the following values may be specified: \n * \"Random\" - picks a random upstream server for each query. * \"RoundRobin\" - picks upstream servers in a round-robin order, moving - to the next server for each new query. * \"Sequential\" - tries querying upstream servers in a sequential order - until one responds, starting with the first server for - each new query. \n The default value is \"Random\"" - enum: - - Random - - RoundRobin - - Sequential - type: string - transportConfig: - description: "transportConfig is used to configure the transport - type, server name, and optional custom CA or CA bundle - to use when forwarding DNS requests to an upstream resolver. - \n The default value is \"\" (empty) which results in - a standard cleartext connection being used when forwarding - DNS requests to an upstream resolver." - properties: - tls: - description: tls contains the additional configuration - options to use when Transport is set to "TLS". - properties: - caBundle: - description: "caBundle references a ConfigMap that - must contain either a single CA Certificate or - a CA Bundle. This allows cluster administrators - to provide their own CA or CA bundle for validating - the certificate of upstream resolvers. \n 1. The - configmap must contain a `ca-bundle.crt` key. - 2. The value must be a PEM encoded CA certificate - or CA bundle. 3. The administrator must create - this configmap in the openshift-config namespace. - 4. The upstream server certificate must contain - a Subject Alternative Name (SAN) that matches - ServerName." - properties: - name: - description: name is the metadata.name of the - referenced config map - type: string - required: - - name - type: object - serverName: - description: serverName is the upstream server to - connect to when forwarding DNS queries. This is - required when Transport is set to "TLS". ServerName - will be validated against the DNS naming conventions - in RFC 1123 and should match the TLS certificate - installed in the upstream resolver(s). - maxLength: 253 - pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ - type: string - required: - - serverName - type: object - transport: - description: "transport allows cluster administrators - to opt-in to using a DNS-over-TLS connection between - cluster DNS and an upstream resolver(s). Configuring - TLS as the transport at this level without configuring - a CABundle will result in the system certificates - being used to verify the serving certificate of the - upstream resolver(s). \n Possible values: \"\" (empty) - - This means no explicit choice has been made and - the platform chooses the default which is subject - to change over time. The current default is \"Cleartext\". - \"Cleartext\" - Cluster admin specified cleartext - option. This results in the same functionality as - an empty value but may be useful when a cluster admin - wants to be more explicit about the transport, or - wants to switch from \"TLS\" to \"Cleartext\" explicitly. - \"TLS\" - This indicates that DNS queries should be - sent over a TLS connection. If Transport is set to - TLS, you MUST also set ServerName. If a port is not - included with the upstream IP, port 853 will be tried - by default per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1." - enum: - - TLS - - Cleartext - - "" - type: string - type: object - upstreams: - description: "upstreams is a list of resolvers to forward - name queries for subdomains of Zones. Each instance of - CoreDNS performs health checking of Upstreams. When a - healthy upstream returns an error during the exchange, - another resolver is tried from Upstreams. The Upstreams - are selected in the order specified in Policy. Each upstream - is represented by an IP address or IP:port if the upstream - listens on a port other than 53. \n A maximum of 15 upstreams - is allowed per ForwardPlugin." - items: - type: string - maxItems: 15 - type: array - type: object - name: - description: name is required and specifies a unique name for - the server. Name must comply with the Service Name Syntax - of rfc6335. - type: string - zones: - description: zones is required and specifies the subdomains - that Server is authoritative for. Zones must conform to the - rfc1123 definition of a subdomain. Specifying the cluster - domain (i.e., "cluster.local") is invalid. - items: - type: string - type: array - type: object - type: array - upstreamResolvers: - default: {} - description: "upstreamResolvers defines a schema for configuring CoreDNS - to proxy DNS messages to upstream resolvers for the case of the - default (\".\") server \n If this field is not specified, the upstream - used will default to /etc/resolv.conf, with policy \"sequential\"" - properties: - policy: - default: Sequential - description: "Policy is used to determine the order in which upstream - servers are selected for querying. Any one of the following - values may be specified: \n * \"Random\" picks a random upstream - server for each query. * \"RoundRobin\" picks upstream servers - in a round-robin order, moving to the next server for each new - query. * \"Sequential\" tries querying upstream servers in a - sequential order until one responds, starting with the first - server for each new query. \n The default value is \"Sequential\"" - enum: - - Random - - RoundRobin - - Sequential - type: string - transportConfig: - description: "transportConfig is used to configure the transport - type, server name, and optional custom CA or CA bundle to use - when forwarding DNS requests to an upstream resolver. \n The - default value is \"\" (empty) which results in a standard cleartext - connection being used when forwarding DNS requests to an upstream - resolver." - properties: - tls: - description: tls contains the additional configuration options - to use when Transport is set to "TLS". - properties: - caBundle: - description: "caBundle references a ConfigMap that must - contain either a single CA Certificate or a CA Bundle. - This allows cluster administrators to provide their - own CA or CA bundle for validating the certificate of - upstream resolvers. \n 1. The configmap must contain - a `ca-bundle.crt` key. 2. The value must be a PEM encoded - CA certificate or CA bundle. 3. The administrator must - create this configmap in the openshift-config namespace. - 4. The upstream server certificate must contain a Subject - Alternative Name (SAN) that matches ServerName." - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - serverName: - description: serverName is the upstream server to connect - to when forwarding DNS queries. This is required when - Transport is set to "TLS". ServerName will be validated - against the DNS naming conventions in RFC 1123 and should - match the TLS certificate installed in the upstream - resolver(s). - maxLength: 253 - pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ - type: string - required: - - serverName - type: object - transport: - description: "transport allows cluster administrators to opt-in - to using a DNS-over-TLS connection between cluster DNS and - an upstream resolver(s). Configuring TLS as the transport - at this level without configuring a CABundle will result - in the system certificates being used to verify the serving - certificate of the upstream resolver(s). \n Possible values: - \"\" (empty) - This means no explicit choice has been made - and the platform chooses the default which is subject to - change over time. The current default is \"Cleartext\". - \"Cleartext\" - Cluster admin specified cleartext option. - This results in the same functionality as an empty value - but may be useful when a cluster admin wants to be more - explicit about the transport, or wants to switch from \"TLS\" - to \"Cleartext\" explicitly. \"TLS\" - This indicates that - DNS queries should be sent over a TLS connection. If Transport - is set to TLS, you MUST also set ServerName. If a port is - not included with the upstream IP, port 853 will be tried - by default per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1." - enum: - - TLS - - Cleartext - - "" - type: string - type: object - upstreams: - default: - - type: SystemResolvConf - description: "Upstreams is a list of resolvers to forward name - queries for the \".\" domain. Each instance of CoreDNS performs - health checking of Upstreams. When a healthy upstream returns - an error during the exchange, another resolver is tried from - Upstreams. The Upstreams are selected in the order specified - in Policy. \n A maximum of 15 upstreams is allowed per ForwardPlugin. - If no Upstreams are specified, /etc/resolv.conf is used by default" - items: - anyOf: - - not: - required: - - address - - port - properties: - type: - enum: - - "" - - SystemResolvConf - - optional: - - port - properties: - type: - enum: - - Network - required: - - address - description: "Upstream can either be of type SystemResolvConf, - or of type Network. \n * For an Upstream of type SystemResolvConf, - no further fields are necessary: The upstream will be configured - to use /etc/resolv.conf. * For an Upstream of type Network, - a NetworkResolver field needs to be defined with an IP address - or IP:port if the upstream listens on a port other than 53." - properties: - address: - anyOf: - - format: ipv4 - - format: ipv6 - description: Address must be defined when Type is set to - Network. It will be ignored otherwise. It must be a valid - ipv4 or ipv6 address. - type: string - port: - default: 53 - description: Port may be defined when Type is set to Network. - It will be ignored otherwise. Port must be between 65535 - format: int32 - maximum: 65535 - minimum: 1 - type: integer - type: - description: "Type defines whether this upstream contains - an IP/IP:port resolver or the local /etc/resolv.conf. - Type accepts 2 possible values: SystemResolvConf or Network. - \n * When SystemResolvConf is used, the Upstream structure - does not require any further fields to be defined: /etc/resolv.conf - will be used * When Network is used, the Upstream structure - must contain at least an Address" - enum: - - SystemResolvConf - - Network - - "" - type: string - required: - - type - type: object - maxItems: 15 - type: array - type: object - type: object - status: - description: status is the most recently observed status of the DNS. - properties: - clusterDomain: - description: "clusterDomain is the local cluster DNS domain suffix - for DNS services. This will be a subdomain as defined in RFC 1034, - section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 Example: - \"cluster.local\" \n More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service" - type: string - clusterIP: - description: "clusterIP is the service IP through which this DNS is - made available. \n In the case of the default DNS, this will be - a well known IP that is used as the default nameserver for pods - that are using the default ClusterFirst DNS policy. \n In general, - this IP can be specified in a pod's spec.dnsConfig.nameservers list - or used explicitly when performing name resolution from within the - cluster. Example: dig foo.com @ \n More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies" - type: string - conditions: - description: "conditions provide information about the state of the - DNS on the cluster. \n These are the supported DNS conditions: \n - \ * Available - True if the following conditions are met: * - DNS controller daemonset is available. - False if any of those - conditions are unsatisfied." - items: - description: OperatorCondition is just the standard condition fields. - properties: - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - type: object - type: array - required: - - clusterDomain - - clusterIP - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml-patch deleted file mode 100644 index 285a52672..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml-patch +++ /dev/null @@ -1,21 +0,0 @@ -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/upstreamResolvers/default - value: {} -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/upstreamResolvers/properties/upstreams/items/properties/address/anyOf - value: - - format: ipv4 - - format: ipv6 -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/upstreamResolvers/properties/upstreams/items/anyOf - value: - - properties: - type: - enum: [ "","SystemResolvConf" ] # empty string because client-side validation will not see the default value - not: - required: [ "address", "port" ] - - properties: - type: - enum: [ "Network" ] - required: [ "address" ] - optional: [ "port" ] diff --git a/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml deleted file mode 100644 index f59319a60..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml +++ /dev/null @@ -1,134 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/562 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: csisnapshotcontrollers.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: CSISnapshotController - plural: csisnapshotcontrollers - singular: csisnapshotcontroller - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - logLevel: - description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - managementState: - description: managementState indicates whether and how the operator should manage the component - type: string - pattern: ^(Managed|Unmanaged|Force|Removed)$ - observedConfig: - description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - status: - description: status holds observed values from the cluster. They may not be overridden. - type: object - properties: - conditions: - description: conditions is a list of conditions and their status - type: array - items: - description: OperatorCondition is just the standard condition fields. - type: object - properties: - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - generations: - description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - type: array - items: - description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. - type: object - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload controller involved - type: integer - format: int64 - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're tracking - type: string - observedGeneration: - description: observedGeneration is the last generation change you've dealt with - type: integer - format: int64 - readyReplicas: - description: readyReplicas indicates how many replicas are ready and at the desired state - type: integer - format: int32 - version: - description: version is the level this availability applies to - type: string - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml deleted file mode 100644 index 6a7f81a43..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml +++ /dev/null @@ -1,183 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/701 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: clustercsidrivers.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: ClusterCSIDriver - plural: clustercsidrivers - singular: clustercsidriver - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "ClusterCSIDriver object allows management and configuration - of a CSI driver operator installed by default in OpenShift. Name of the - object must be name of the CSI driver it operates. See CSIDriverName type - for list of allowed values. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - properties: - name: - enum: - - ebs.csi.aws.com - - efs.csi.aws.com - - disk.csi.azure.com - - file.csi.azure.com - - pd.csi.storage.gke.io - - cinder.csi.openstack.org - - csi.vsphere.vmware.com - - manila.csi.openstack.org - - csi.ovirt.org - - csi.kubevirt.io - - csi.sharedresource.openshift.io - - diskplugin.csi.alibabacloud.com - - vpc.block.csi.ibm.io - - powervs.csi.ibm.com - type: string - type: object - spec: - description: spec holds user settable values for configuration - properties: - logLevel: - default: Normal - description: "logLevel is an intent based logging for an overall component. - \ It does not give fine grained control, but it is a simple way - to manage coarse grained logging choices that operators have to - interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", - \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - type: string - managementState: - description: managementState indicates whether and how the operator - should manage the component - pattern: ^(Managed|Unmanaged|Force|Removed)$ - type: string - observedConfig: - description: observedConfig holds a sparse config that controller - has observed from the cluster state. It exists in spec because - it is an input to the level for the operator - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - default: Normal - description: "operatorLogLevel is an intent based logging for the - operator itself. It does not give fine grained control, but it - is a simple way to manage coarse grained logging choices that operators - have to interpret for themselves. \n Valid values are: \"Normal\", - \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - type: string - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that - will override any previously set options. It only needs to be the - fields to override it will end up overlaying in the following order: - 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - conditions: - description: conditions is a list of conditions and their status - items: - description: OperatorCondition is just the standard condition fields. - properties: - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - type: object - type: array - generations: - description: generations are used to determine when an item needs - to be reconciled or has changed in a way that needs a reaction. - items: - description: GenerationStatus keeps track of the generation for - a given resource so that decisions about forced updates can be - made. - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without - generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload - controller involved - format: int64 - type: integer - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're - tracking - type: string - type: object - type: array - observedGeneration: - description: observedGeneration is the last generation change you've - dealt with - format: int64 - type: integer - readyReplicas: - description: readyReplicas indicates how many replicas are ready and - at the desired state - format: int32 - type: integer - version: - description: version is the level this availability applies to - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch deleted file mode 100644 index 2d880c716..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch +++ /dev/null @@ -1,20 +0,0 @@ -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/metadata/properties - value: - name: - type: string - enum: - - ebs.csi.aws.com - - efs.csi.aws.com - - disk.csi.azure.com - - file.csi.azure.com - - pd.csi.storage.gke.io - - cinder.csi.openstack.org - - csi.vsphere.vmware.com - - manila.csi.openstack.org - - csi.ovirt.org - - csi.kubevirt.io - - csi.sharedresource.openshift.io - - diskplugin.csi.alibabacloud.com - - vpc.block.csi.ibm.io - - powervs.csi.ibm.com diff --git a/vendor/github.com/openshift/api/operator/v1/doc.go b/vendor/github.com/openshift/api/operator/v1/doc.go deleted file mode 100644 index 3de961a7f..000000000 --- a/vendor/github.com/openshift/api/operator/v1/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// +k8s:deepcopy-gen=package,register -// +k8s:defaulter-gen=TypeMeta -// +k8s:openapi-gen=true - -// +kubebuilder:validation:Optional -// +groupName=operator.openshift.io -package v1 diff --git a/vendor/github.com/openshift/api/operator/v1/register.go b/vendor/github.com/openshift/api/operator/v1/register.go deleted file mode 100644 index 71727a824..000000000 --- a/vendor/github.com/openshift/api/operator/v1/register.go +++ /dev/null @@ -1,76 +0,0 @@ -package v1 - -import ( - configv1 "github.com/openshift/api/config/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - GroupName = "operator.openshift.io" - GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) - // Install is a function which adds this version to a scheme - Install = schemeBuilder.AddToScheme - - // SchemeGroupVersion generated code relies on this name - // Deprecated - SchemeGroupVersion = GroupVersion - // AddToScheme exists solely to keep the old generators creating valid code - // DEPRECATED - AddToScheme = schemeBuilder.AddToScheme -) - -// Resource generated code relies on this being here, but it logically belongs to the group -// DEPRECATED -func Resource(resource string) schema.GroupResource { - return schema.GroupResource{Group: GroupName, Resource: resource} -} - -func addKnownTypes(scheme *runtime.Scheme) error { - metav1.AddToGroupVersion(scheme, GroupVersion) - - scheme.AddKnownTypes(GroupVersion, - &Authentication{}, - &AuthenticationList{}, - &DNS{}, - &DNSList{}, - &CloudCredential{}, - &CloudCredentialList{}, - &ClusterCSIDriver{}, - &ClusterCSIDriverList{}, - &Console{}, - &ConsoleList{}, - &CSISnapshotController{}, - &CSISnapshotControllerList{}, - &Etcd{}, - &EtcdList{}, - &KubeAPIServer{}, - &KubeAPIServerList{}, - &KubeControllerManager{}, - &KubeControllerManagerList{}, - &KubeScheduler{}, - &KubeSchedulerList{}, - &KubeStorageVersionMigrator{}, - &KubeStorageVersionMigratorList{}, - &Network{}, - &NetworkList{}, - &OpenShiftAPIServer{}, - &OpenShiftAPIServerList{}, - &OpenShiftControllerManager{}, - &OpenShiftControllerManagerList{}, - &ServiceCA{}, - &ServiceCAList{}, - &ServiceCatalogAPIServer{}, - &ServiceCatalogAPIServerList{}, - &ServiceCatalogControllerManager{}, - &ServiceCatalogControllerManagerList{}, - &IngressController{}, - &IngressControllerList{}, - &Storage{}, - &StorageList{}, - ) - - return nil -} diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go deleted file mode 100644 index 5f731593d..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types.go +++ /dev/null @@ -1,234 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// MyOperatorResource is an example operator configuration type -// -// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. -// +openshift:compatibility-gen:internal -type MyOperatorResource struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - // +kubebuilder:validation:Required - // +required - Spec MyOperatorResourceSpec `json:"spec"` - Status MyOperatorResourceStatus `json:"status"` -} - -type MyOperatorResourceSpec struct { - OperatorSpec `json:",inline"` -} - -type MyOperatorResourceStatus struct { - OperatorStatus `json:",inline"` -} - -// +kubebuilder:validation:Pattern=`^(Managed|Unmanaged|Force|Removed)$` -type ManagementState string - -var ( - // Force means that the operator is actively managing its resources but will not block an upgrade - // if unmet prereqs exist. This state puts the operator at risk for unsuccessful upgrades - Force ManagementState = "Force" - // Managed means that the operator is actively managing its resources and trying to keep the component active. - // It will only upgrade the component if it is safe to do so - Managed ManagementState = "Managed" - // Unmanaged means that the operator will not take any action related to the component - // Some operators might not support this management state as it might damage the cluster and lead to manual recovery. - Unmanaged ManagementState = "Unmanaged" - // Removed means that the operator is actively managing its resources and trying to remove all traces of the component - // Some operators (like kube-apiserver-operator) might not support this management state as removing the API server will - // brick the cluster. - Removed ManagementState = "Removed" -) - -// OperatorSpec contains common fields operators need. It is intended to be anonymous included -// inside of the Spec struct for your particular operator. -type OperatorSpec struct { - // managementState indicates whether and how the operator should manage the component - ManagementState ManagementState `json:"managementState"` - - // logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a - // simple way to manage coarse grained logging choices that operators have to interpret for their operands. - // - // Valid values are: "Normal", "Debug", "Trace", "TraceAll". - // Defaults to "Normal". - // +optional - // +kubebuilder:default=Normal - LogLevel LogLevel `json:"logLevel,omitempty"` - - // operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a - // simple way to manage coarse grained logging choices that operators have to interpret for themselves. - // - // Valid values are: "Normal", "Debug", "Trace", "TraceAll". - // Defaults to "Normal". - // +optional - // +kubebuilder:default=Normal - OperatorLogLevel LogLevel `json:"operatorLogLevel,omitempty"` - - // unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override - // it will end up overlaying in the following order: - // 1. hardcoded defaults - // 2. observedConfig - // 3. unsupportedConfigOverrides - // +optional - // +nullable - // +kubebuilder:pruning:PreserveUnknownFields - UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"` - - // observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because - // it is an input to the level for the operator - // +optional - // +nullable - // +kubebuilder:pruning:PreserveUnknownFields - ObservedConfig runtime.RawExtension `json:"observedConfig"` -} - -// +kubebuilder:validation:Enum="";Normal;Debug;Trace;TraceAll -type LogLevel string - -var ( - // Normal is the default. Normal, working log information, everything is fine, but helpful notices for auditing or common operations. In kube, this is probably glog=2. - Normal LogLevel = "Normal" - - // Debug is used when something went wrong. Even common operations may be logged, and less helpful but more quantity of notices. In kube, this is probably glog=4. - Debug LogLevel = "Debug" - - // Trace is used when something went really badly and even more verbose logs are needed. Logging every function call as part of a common operation, to tracing execution of a query. In kube, this is probably glog=6. - Trace LogLevel = "Trace" - - // TraceAll is used when something is broken at the level of API content/decoding. It will dump complete body content. If you turn this on in a production cluster - // prepare from serious performance issues and massive amounts of logs. In kube, this is probably glog=8. - TraceAll LogLevel = "TraceAll" -) - -type OperatorStatus struct { - // observedGeneration is the last generation change you've dealt with - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` - - // conditions is a list of conditions and their status - // +optional - Conditions []OperatorCondition `json:"conditions,omitempty"` - - // version is the level this availability applies to - // +optional - Version string `json:"version,omitempty"` - - // readyReplicas indicates how many replicas are ready and at the desired state - ReadyReplicas int32 `json:"readyReplicas"` - - // generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - // +optional - Generations []GenerationStatus `json:"generations,omitempty"` -} - -// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. -type GenerationStatus struct { - // group is the group of the thing you're tracking - Group string `json:"group"` - // resource is the resource type of the thing you're tracking - Resource string `json:"resource"` - // namespace is where the thing you're tracking is - Namespace string `json:"namespace"` - // name is the name of the thing you're tracking - Name string `json:"name"` - // lastGeneration is the last generation of the workload controller involved - LastGeneration int64 `json:"lastGeneration"` - // hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps - Hash string `json:"hash"` -} - -var ( - // Available indicates that the operand is present and accessible in the cluster - OperatorStatusTypeAvailable = "Available" - // Progressing indicates that the operator is trying to transition the operand to a different state - OperatorStatusTypeProgressing = "Progressing" - // Degraded indicates that the operator (not the operand) is unable to fulfill the user intent - OperatorStatusTypeDegraded = "Degraded" - // PrereqsSatisfied indicates that the things this operator depends on are present and at levels compatible with the - // current and desired states. - OperatorStatusTypePrereqsSatisfied = "PrereqsSatisfied" - // Upgradeable indicates that the operator configuration itself (not prereqs) can be auto-upgraded by the CVO - OperatorStatusTypeUpgradeable = "Upgradeable" -) - -// OperatorCondition is just the standard condition fields. -type OperatorCondition struct { - Type string `json:"type"` - Status ConditionStatus `json:"status"` - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - Reason string `json:"reason,omitempty"` - Message string `json:"message,omitempty"` -} - -type ConditionStatus string - -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// StaticPodOperatorSpec is spec for controllers that manage static pods. -type StaticPodOperatorSpec struct { - OperatorSpec `json:",inline"` - - // forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. - // This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work - // this time instead of failing again on the same config. - ForceRedeploymentReason string `json:"forceRedeploymentReason"` - - // failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api - // -1 = unlimited, 0 or unset = 5 (default) - FailedRevisionLimit int32 `json:"failedRevisionLimit,omitempty"` - // succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api - // -1 = unlimited, 0 or unset = 5 (default) - SucceededRevisionLimit int32 `json:"succeededRevisionLimit,omitempty"` -} - -// StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual -// node status must be tracked. -type StaticPodOperatorStatus struct { - OperatorStatus `json:",inline"` - - // latestAvailableRevision is the deploymentID of the most recent deployment - // +optional - LatestAvailableRevision int32 `json:"latestAvailableRevision,omitEmpty"` - - // latestAvailableRevisionReason describe the detailed reason for the most recent deployment - // +optional - LatestAvailableRevisionReason string `json:"latestAvailableRevisionReason,omitEmpty"` - - // nodeStatuses track the deployment values and errors across individual nodes - // +optional - NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"` -} - -// NodeStatus provides information about the current state of a particular node managed by this operator. -type NodeStatus struct { - // nodeName is the name of the node - NodeName string `json:"nodeName"` - - // currentRevision is the generation of the most recently successful deployment - CurrentRevision int32 `json:"currentRevision"` - // targetRevision is the generation of the deployment we're trying to apply - TargetRevision int32 `json:"targetRevision,omitempty"` - - // lastFailedRevision is the generation of the deployment we tried and failed to deploy. - LastFailedRevision int32 `json:"lastFailedRevision,omitempty"` - // lastFailedTime is the time the last failed revision failed the last time. - LastFailedTime *metav1.Time `json:"lastFailedTime,omitempty"` - // lastFailedReason is a machine readable failure reason string. - LastFailedReason string `json:"lastFailedReason,omitempty"` - // lastFailedCount is how often the installer pod of the last failed revision failed. - LastFailedCount int `json:"lastFailedCount,omitempty"` - // lastFallbackCount is how often a fallback to a previous revision happened. - LastFallbackCount int `json:"lastFallbackCount,omitempty"` - // lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. - LastFailedRevisionErrors []string `json:"lastFailedRevisionErrors,omitempty"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_authentication.go b/vendor/github.com/openshift/api/operator/v1/types_authentication.go deleted file mode 100644 index 80aa55f39..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_authentication.go +++ /dev/null @@ -1,57 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Authentication provides information to configure an operator to manage authentication. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type Authentication struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +kubebuilder:validation:Required - // +required - Spec AuthenticationSpec `json:"spec,omitempty"` - // +optional - Status AuthenticationStatus `json:"status,omitempty"` -} - -type AuthenticationSpec struct { - OperatorSpec `json:",inline"` -} - -type AuthenticationStatus struct { - // OAuthAPIServer holds status specific only to oauth-apiserver - // +optional - OAuthAPIServer OAuthAPIServerStatus `json:"oauthAPIServer,omitempty"` - - OperatorStatus `json:",inline"` -} - -type OAuthAPIServerStatus struct { - // LatestAvailableRevision is the latest revision used as suffix of revisioned - // secrets like encryption-config. A new revision causes a new deployment of pods. - // +optional - // +kubebuilder:validation:Minimum=0 - LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AuthenticationList is a collection of items -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type AuthenticationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []Authentication `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go deleted file mode 100644 index 8ad336fa2..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go +++ /dev/null @@ -1,81 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CloudCredential provides a means to configure an operator to manage CredentialsRequests. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type CloudCredential struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +kubebuilder:validation:Required - // +required - Spec CloudCredentialSpec `json:"spec"` - // +optional - Status CloudCredentialStatus `json:"status"` -} - -// CloudCredentialsMode is the specified mode the cloud-credential-operator -// should reconcile CredentialsRequest with -// +kubebuilder:validation:Enum="";Manual;Mint;Passthrough -type CloudCredentialsMode string - -const ( - // CloudCredentialsModeManual tells cloud-credential-operator to not reconcile any CredentialsRequests - // (primarily used for the disconnected VPC use-cases). - CloudCredentialsModeManual CloudCredentialsMode = "Manual" - - // CloudCredentialsModeMint tells cloud-credential-operator to reconcile all CredentialsRequests - // by minting new users/credentials. - CloudCredentialsModeMint CloudCredentialsMode = "Mint" - - // CloudCredentialsModePassthrough tells cloud-credential-operator to reconcile all CredentialsRequests - // by copying the cloud-specific secret data. - CloudCredentialsModePassthrough CloudCredentialsMode = "Passthrough" - - // CloudCredentialsModeDefault puts CCO into the default mode of operation (per-cloud/platform defaults): - // AWS/Azure/GCP: dynamically determine cluster's cloud credential capabilities to affect - // processing of CredentialsRequests - // All other clouds/platforms (OpenStack, oVirt, vSphere, etc): run in "passthrough" mode - CloudCredentialsModeDefault CloudCredentialsMode = "" -) - -// CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. -type CloudCredentialSpec struct { - OperatorSpec `json:",inline"` - // CredentialsMode allows informing CCO that it should not attempt to dynamically - // determine the root cloud credentials capabilities, and it should just run in - // the specified mode. - // It also allows putting the operator into "manual" mode if desired. - // Leaving the field in default mode runs CCO so that the cluster's cloud credentials - // will be dynamically probed for capabilities (on supported clouds/platforms). - // Supported modes: - // AWS/Azure/GCP: "" (Default), "Mint", "Passthrough", "Manual" - // Others: Do not set value as other platforms only support running in "Passthrough" - // +optional - CredentialsMode CloudCredentialsMode `json:"credentialsMode,omitempty"` -} - -// CloudCredentialStatus defines the observed status of the cloud-credential-operator. -type CloudCredentialStatus struct { - OperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type CloudCredentialList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []CloudCredential `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_config.go b/vendor/github.com/openshift/api/operator/v1/types_config.go deleted file mode 100644 index 89a6975ac..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_config.go +++ /dev/null @@ -1,49 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Config provides information to configure the config operator. It handles installation, migration or synchronization of cloud based cluster configurations like AWS or Azure. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type Config struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - // spec is the specification of the desired behavior of the Config Operator. - // +kubebuilder:validation:Required - // +required - Spec ConfigSpec `json:"spec"` - - // status defines the observed status of the Config Operator. - // +optional - Status ConfigStatus `json:"status"` -} - -type ConfigSpec struct { - OperatorSpec `json:",inline"` -} - -type ConfigStatus struct { - OperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConfigList is a collection of items -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ConfigList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - // Items contains the items - Items []Config `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go deleted file mode 100644 index a01333b7c..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_console.go +++ /dev/null @@ -1,233 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - configv1 "github.com/openshift/api/config/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Console provides a means to configure an operator to manage the console. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type Console struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +kubebuilder:validation:Required - // +required - Spec ConsoleSpec `json:"spec,omitempty"` - // +optional - Status ConsoleStatus `json:"status,omitempty"` -} - -// ConsoleSpec is the specification of the desired behavior of the Console. -type ConsoleSpec struct { - OperatorSpec `json:",inline"` - // customization is used to optionally provide a small set of - // customization options to the web console. - // +optional - Customization ConsoleCustomization `json:"customization"` - // providers contains configuration for using specific service providers. - Providers ConsoleProviders `json:"providers"` - // route contains hostname and secret reference that contains the serving certificate. - // If a custom route is specified, a new route will be created with the - // provided hostname, under which console will be available. - // In case of custom hostname uses the default routing suffix of the cluster, - // the Secret specification for a serving certificate will not be needed. - // In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. - // The default console route will be maintained to reserve the default hostname - // for console if the custom route is removed. - // If not specified, default route will be used. - // DEPRECATED - // +optional - Route ConsoleConfigRoute `json:"route"` - // plugins defines a list of enabled console plugin names. - // +optional - Plugins []string `json:"plugins,omitempty"` -} - -// ConsoleConfigRoute holds information on external route access to console. -// DEPRECATED -type ConsoleConfigRoute struct { - // hostname is the desired custom domain under which console will be available. - Hostname string `json:"hostname"` - // secret points to secret in the openshift-config namespace that contains custom - // certificate and key and needs to be created manually by the cluster admin. - // Referenced Secret is required to contain following key value pairs: - // - "tls.crt" - to specifies custom certificate - // - "tls.key" - to specifies private key of the custom certificate - // If the custom hostname uses the default routing suffix of the cluster, - // the Secret specification for a serving certificate will not be needed. - // +optional - Secret configv1.SecretNameReference `json:"secret"` -} - -// ConsoleStatus defines the observed status of the Console. -type ConsoleStatus struct { - OperatorStatus `json:",inline"` -} - -// ConsoleProviders defines a list of optional additional providers of -// functionality to the console. -type ConsoleProviders struct { - // statuspage contains ID for statuspage.io page that provides status info about. - // +optional - Statuspage *StatuspageProvider `json:"statuspage,omitempty"` -} - -// StatuspageProvider provides identity for statuspage account. -type StatuspageProvider struct { - // pageID is the unique ID assigned by Statuspage for your page. This must be a public page. - PageID string `json:"pageID"` -} - -// ConsoleCustomization defines a list of optional configuration for the console UI. -type ConsoleCustomization struct { - // brand is the default branding of the web console which can be overridden by - // providing the brand field. There is a limited set of specific brand options. - // This field controls elements of the console such as the logo. - // Invalid value will prevent a console rollout. - Brand Brand `json:"brand,omitempty"` - // documentationBaseURL links to external documentation are shown in various sections - // of the web console. Providing documentationBaseURL will override the default - // documentation URL. - // Invalid value will prevent a console rollout. - // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))\/$` - DocumentationBaseURL string `json:"documentationBaseURL,omitempty"` - // customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog - // instead of the normal OpenShift product name. - // +optional - CustomProductName string `json:"customProductName,omitempty"` - // customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a - // ConfigMap in the openshift-config namespace. This can be created with a command like - // 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. - // Image size must be less than 1 MB due to constraints on the ConfigMap size. - // The ConfigMap key should include a file extension so that the console serves the file - // with the correct MIME type. - // Recommended logo specifications: - // Dimensions: Max height of 68px and max width of 200px - // SVG format preferred - // +optional - CustomLogoFile configv1.ConfigMapFileReference `json:"customLogoFile,omitempty"` - // developerCatalog allows to configure the shown developer catalog categories. - // +kubebuilder:validation:Optional - // +optional - DeveloperCatalog DeveloperConsoleCatalogCustomization `json:"developerCatalog,omitempty"` - // projectAccess allows customizing the available list of ClusterRoles in the Developer perspective - // Project access page which can be used by a project admin to specify roles to other users and - // restrict access within the project. If set, the list will replace the default ClusterRole options. - // +kubebuilder:validation:Optional - // +optional - ProjectAccess ProjectAccess `json:"projectAccess,omitempty"` - // quickStarts allows customization of available ConsoleQuickStart resources in console. - // +kubebuilder:validation:Optional - // +optional - QuickStarts QuickStarts `json:"quickStarts,omitempty"` - // addPage allows customizing actions on the Add page in developer perspective. - // +kubebuilder:validation:Optional - // +optional - AddPage AddPage `json:"addPage,omitempty"` -} - -// ProjectAccess contains options for project access roles -type ProjectAccess struct { - // availableClusterRoles is the list of ClusterRole names that are assignable to users - // through the project access tab. - // +kubebuilder:validation:Optional - // +optional - AvailableClusterRoles []string `json:"availableClusterRoles,omitempty"` -} - -// DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog. -type DeveloperConsoleCatalogCustomization struct { - // categories which are shown in the developer catalog. - // +kubebuilder:validation:Optional - // +optional - Categories []DeveloperConsoleCatalogCategory `json:"categories,omitempty"` -} - -// DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category. -type DeveloperConsoleCatalogCategoryMeta struct { - // ID is an identifier used in the URL to enable deep linking in console. - // ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=32 - // +kubebuilder:validation:Pattern=`^[A-Za-z0-9-_]+$` - // +required - ID string `json:"id"` - // label defines a category display label. It is required and must have 1-64 characters. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=64 - // +required - Label string `json:"label"` - // tags is a list of strings that will match the category. A selected category - // show all items which has at least one overlapping tag between category and item. - // +kubebuilder:validation:Optional - // +optional - Tags []string `json:"tags,omitempty"` -} - -// DeveloperConsoleCatalogCategory for the developer console catalog. -type DeveloperConsoleCatalogCategory struct { - // defines top level category ID, label and filter tags. - DeveloperConsoleCatalogCategoryMeta `json:",inline"` - // subcategories defines a list of child categories. - // +kubebuilder:validation:Optional - // +optional - Subcategories []DeveloperConsoleCatalogCategoryMeta `json:"subcategories,omitempty"` -} - -// QuickStarts allow cluster admins to customize available ConsoleQuickStart resources. -type QuickStarts struct { - // disabled is a list of ConsoleQuickStart resource names that are not shown to users. - // +kubebuilder:validation:Optional - // +optional - Disabled []string `json:"disabled,omitempty"` -} - -// AddPage allows customizing actions on the Add page in developer perspective. -type AddPage struct { - // disabledActions is a list of actions that are not shown to users. - // Each action in the list is represented by its ID. - // +kubebuilder:validation:Optional - // +kubebuilder:validation:MinItems=1 - // +optional - DisabledActions []string `json:"disabledActions,omitempty"` -} - -// Brand is a specific supported brand within the console. -// +kubebuilder:validation:Pattern=`^$|^(ocp|origin|okd|dedicated|online|azure)$` -type Brand string - -const ( - // Branding for OpenShift - BrandOpenShift Brand = "openshift" - // Branding for The Origin Community Distribution of Kubernetes - BrandOKD Brand = "okd" - // Branding for OpenShift Online - BrandOnline Brand = "online" - // Branding for OpenShift Container Platform - BrandOCP Brand = "ocp" - // Branding for OpenShift Dedicated - BrandDedicated Brand = "dedicated" - // Branding for Azure Red Hat OpenShift - BrandAzure Brand = "azure" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ConsoleList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []Console `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go deleted file mode 100644 index 10bd8b911..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go +++ /dev/null @@ -1,82 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ClusterCSIDriver is used to manage and configure CSI driver installed by default -// in OpenShift. An example configuration may look like: -// apiVersion: operator.openshift.io/v1 -// kind: "ClusterCSIDriver" -// metadata: -// name: "ebs.csi.aws.com" -// spec: -// logLevel: Debug - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterCSIDriver object allows management and configuration of a CSI driver operator -// installed by default in OpenShift. Name of the object must be name of the CSI driver -// it operates. See CSIDriverName type for list of allowed values. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ClusterCSIDriver struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec ClusterCSIDriverSpec `json:"spec"` - - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status ClusterCSIDriverStatus `json:"status"` -} - -// CSIDriverName is the name of the CSI driver -type CSIDriverName string - -// If you are adding a new driver name here, ensure that 0000_90_cluster_csi_driver_01_config.crd.yaml-merge-patch file is also updated with new driver name. -const ( - AWSEBSCSIDriver CSIDriverName = "ebs.csi.aws.com" - AWSEFSCSIDriver CSIDriverName = "efs.csi.aws.com" - AzureDiskCSIDriver CSIDriverName = "disk.csi.azure.com" - AzureFileCSIDriver CSIDriverName = "file.csi.azure.com" - GCPPDCSIDriver CSIDriverName = "pd.csi.storage.gke.io" - CinderCSIDriver CSIDriverName = "cinder.csi.openstack.org" - VSphereCSIDriver CSIDriverName = "csi.vsphere.vmware.com" - ManilaCSIDriver CSIDriverName = "manila.csi.openstack.org" - OvirtCSIDriver CSIDriverName = "csi.ovirt.org" - KubevirtCSIDriver CSIDriverName = "csi.kubevirt.io" - SharedResourcesCSIDriver CSIDriverName = "csi.sharedresource.openshift.io" - AlibabaDiskCSIDriver CSIDriverName = "diskplugin.csi.alibabacloud.com" - IBMVPCBlockCSIDriver CSIDriverName = "vpc.block.csi.ibm.io" - IBMPowerVSBlockCSIDriver CSIDriverName = "powervs.csi.ibm.com" -) - -// ClusterCSIDriverSpec is the desired behavior of CSI driver operator -type ClusterCSIDriverSpec struct { - OperatorSpec `json:",inline"` -} - -// ClusterCSIDriverStatus is the observed status of CSI driver operator -type ClusterCSIDriverStatus struct { - OperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -// ClusterCSIDriverList contains a list of ClusterCSIDriver -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ClusterCSIDriverList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []ClusterCSIDriver `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go deleted file mode 100644 index 21db5df0a..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go +++ /dev/null @@ -1,50 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type CSISnapshotController struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec CSISnapshotControllerSpec `json:"spec"` - - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status CSISnapshotControllerStatus `json:"status"` -} - -// CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator. -type CSISnapshotControllerSpec struct { - OperatorSpec `json:",inline"` -} - -// CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator. -type CSISnapshotControllerStatus struct { - OperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -// CSISnapshotControllerList contains a list of CSISnapshotControllers. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type CSISnapshotControllerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []CSISnapshotController `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_dns.go b/vendor/github.com/openshift/api/operator/v1/types_dns.go deleted file mode 100644 index 4221df78a..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_dns.go +++ /dev/null @@ -1,429 +0,0 @@ -package v1 - -import ( - v1 "github.com/openshift/api/config/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - corev1 "k8s.io/api/core/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=dnses,scope=Cluster -// +kubebuilder:subresource:status - -// DNS manages the CoreDNS component to provide a name resolution service -// for pods and services in the cluster. -// -// This supports the DNS-based service discovery specification: -// https://github.com/kubernetes/dns/blob/master/docs/specification.md -// -// More details: https://kubernetes.io/docs/tasks/administer-cluster/coredns -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type DNS struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec is the specification of the desired behavior of the DNS. - Spec DNSSpec `json:"spec,omitempty"` - // status is the most recently observed status of the DNS. - Status DNSStatus `json:"status,omitempty"` -} - -// DNSSpec is the specification of the desired behavior of the DNS. -type DNSSpec struct { - // servers is a list of DNS resolvers that provide name query delegation for one or - // more subdomains outside the scope of the cluster domain. If servers consists of - // more than one Server, longest suffix match will be used to determine the Server. - // - // For example, if there are two Servers, one for "foo.com" and another for "a.foo.com", - // and the name query is for "www.a.foo.com", it will be routed to the Server with Zone - // "a.foo.com". - // - // If this field is nil, no servers are created. - // - // +optional - Servers []Server `json:"servers,omitempty"` - - // upstreamResolvers defines a schema for configuring CoreDNS - // to proxy DNS messages to upstream resolvers for the case of the - // default (".") server - // - // If this field is not specified, the upstream used will default to - // /etc/resolv.conf, with policy "sequential" - // - // +optional - UpstreamResolvers UpstreamResolvers `json:"upstreamResolvers"` - - // nodePlacement provides explicit control over the scheduling of DNS - // pods. - // - // Generally, it is useful to run a DNS pod on every node so that DNS - // queries are always handled by a local DNS pod instead of going over - // the network to a DNS pod on another node. However, security policies - // may require restricting the placement of DNS pods to specific nodes. - // For example, if a security policy prohibits pods on arbitrary nodes - // from communicating with the API, a node selector can be specified to - // restrict DNS pods to nodes that are permitted to communicate with the - // API. Conversely, if running DNS pods on nodes with a particular - // taint is desired, a toleration can be specified for that taint. - // - // If unset, defaults are used. See nodePlacement for more details. - // - // +optional - NodePlacement DNSNodePlacement `json:"nodePlacement,omitempty"` - - // managementState indicates whether the DNS operator should manage cluster - // DNS - // +optional - ManagementState ManagementState `json:"managementState,omitempty"` - - // operatorLogLevel controls the logging level of the DNS Operator. - // Valid values are: "Normal", "Debug", "Trace". - // Defaults to "Normal". - // setting operatorLogLevel: Trace will produce extremely verbose logs. - // +optional - // +kubebuilder:default=Normal - OperatorLogLevel DNSLogLevel `json:"operatorLogLevel,omitempty"` - - // logLevel describes the desired logging verbosity for CoreDNS. - // Any one of the following values may be specified: - // * Normal logs errors from upstream resolvers. - // * Debug logs errors, NXDOMAIN responses, and NODATA responses. - // * Trace logs errors and all responses. - // Setting logLevel: Trace will produce extremely verbose logs. - // Valid values are: "Normal", "Debug", "Trace". - // Defaults to "Normal". - // +optional - // +kubebuilder:default=Normal - LogLevel DNSLogLevel `json:"logLevel,omitempty"` -} - -// +kubebuilder:validation:Enum:=Normal;Debug;Trace -type DNSLogLevel string - -var ( - // Normal is the default. Normal, working log information, everything is fine, but helpful notices for auditing or common operations. In kube, this is probably glog=2. - DNSLogLevelNormal DNSLogLevel = "Normal" - - // Debug is used when something went wrong. Even common operations may be logged, and less helpful but more quantity of notices. In kube, this is probably glog=4. - DNSLogLevelDebug DNSLogLevel = "Debug" - - // Trace is used when something went really badly and even more verbose logs are needed. Logging every function call as part of a common operation, to tracing execution of a query. In kube, this is probably glog=6. - DNSLogLevelTrace DNSLogLevel = "Trace" -) - -// Server defines the schema for a server that runs per instance of CoreDNS. -type Server struct { - // name is required and specifies a unique name for the server. Name must comply - // with the Service Name Syntax of rfc6335. - Name string `json:"name"` - // zones is required and specifies the subdomains that Server is authoritative for. - // Zones must conform to the rfc1123 definition of a subdomain. Specifying the - // cluster domain (i.e., "cluster.local") is invalid. - Zones []string `json:"zones"` - // forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages - // to upstream resolvers. - ForwardPlugin ForwardPlugin `json:"forwardPlugin"` -} - -// DNSTransport indicates what type of connection should be used. -// +kubebuilder:validation:Enum=TLS;Cleartext;"" -type DNSTransport string - -const ( - // TLSTransport indicates that TLS should be used for the connection. - TLSTransport DNSTransport = "TLS" - - // CleartextTransport indicates that no encryption should be used for - // the connection. - CleartextTransport DNSTransport = "Cleartext" -) - -// DNSTransportConfig groups related configuration parameters used for configuring -// forwarding to upstream resolvers that support DNS-over-TLS. -// +union -type DNSTransportConfig struct { - // transport allows cluster administrators to opt-in to using a DNS-over-TLS - // connection between cluster DNS and an upstream resolver(s). Configuring - // TLS as the transport at this level without configuring a CABundle will - // result in the system certificates being used to verify the serving - // certificate of the upstream resolver(s). - // - // Possible values: - // "" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject - // to change over time. The current default is "Cleartext". - // "Cleartext" - Cluster admin specified cleartext option. This results in the same functionality - // as an empty value but may be useful when a cluster admin wants to be more explicit about the transport, - // or wants to switch from "TLS" to "Cleartext" explicitly. - // "TLS" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS, - // you MUST also set ServerName. If a port is not included with the upstream IP, port 853 will be tried by default - // per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1. - // - // +optional - // +unionDiscriminator - Transport DNSTransport `json:"transport,omitempty"` - - // tls contains the additional configuration options to use when Transport is set to "TLS". - TLS *DNSOverTLSConfig `json:"tls,omitempty"` -} - -// DNSOverTLSConfig describes optional DNSTransportConfig fields that should be captured. -type DNSOverTLSConfig struct { - // serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is - // set to "TLS". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the - // TLS certificate installed in the upstream resolver(s). - // - // + --- - // + Inspired by the DNS1123 patterns in Kubernetes: https://github.com/kubernetes/kubernetes/blob/7c46f40bdf89a437ecdbc01df45e235b5f6d9745/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L178-L218 - // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxLength=253 - // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` - ServerName string `json:"serverName"` - - // caBundle references a ConfigMap that must contain either a single - // CA Certificate or a CA Bundle. This allows cluster administrators to provide their - // own CA or CA bundle for validating the certificate of upstream resolvers. - // - // 1. The configmap must contain a `ca-bundle.crt` key. - // 2. The value must be a PEM encoded CA certificate or CA bundle. - // 3. The administrator must create this configmap in the openshift-config namespace. - // 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName. - // - // +optional - CABundle v1.ConfigMapNameReference `json:"caBundle,omitempty"` -} - -// ForwardingPolicy is the policy to use when forwarding DNS requests. -// +kubebuilder:validation:Enum=Random;RoundRobin;Sequential -type ForwardingPolicy string - -const ( - // RandomForwardingPolicy picks a random upstream server for each query. - RandomForwardingPolicy ForwardingPolicy = "Random" - - // RoundRobinForwardingPolicy picks upstream servers in a round-robin order, moving to the next server for each new query. - RoundRobinForwardingPolicy ForwardingPolicy = "RoundRobin" - - // SequentialForwardingPolicy tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. - SequentialForwardingPolicy ForwardingPolicy = "Sequential" -) - -// ForwardPlugin defines a schema for configuring the CoreDNS forward plugin. -type ForwardPlugin struct { - // upstreams is a list of resolvers to forward name queries for subdomains of Zones. - // Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream - // returns an error during the exchange, another resolver is tried from Upstreams. The - // Upstreams are selected in the order specified in Policy. Each upstream is represented - // by an IP address or IP:port if the upstream listens on a port other than 53. - // - // A maximum of 15 upstreams is allowed per ForwardPlugin. - // - // +kubebuilder:validation:MaxItems=15 - Upstreams []string `json:"upstreams"` - - // policy is used to determine the order in which upstream servers are selected for querying. - // Any one of the following values may be specified: - // - // * "Random" picks a random upstream server for each query. - // * "RoundRobin" picks upstream servers in a round-robin order, moving to the next server for each new query. - // * "Sequential" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. - // - // The default value is "Random" - // - // +optional - // +kubebuilder:default:="Random" - Policy ForwardingPolicy `json:"policy,omitempty"` - - // transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use - // when forwarding DNS requests to an upstream resolver. - // - // The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS - // requests to an upstream resolver. - // - // +optional - TransportConfig DNSTransportConfig `json:"transportConfig,omitempty"` -} - -// UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the -// specific case of the default (".") server. -// It defers from ForwardPlugin in the default values it accepts: -// * At least one upstream should be specified. -// * the default policy is Sequential -type UpstreamResolvers struct { - // Upstreams is a list of resolvers to forward name queries for the "." domain. - // Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream - // returns an error during the exchange, another resolver is tried from Upstreams. The - // Upstreams are selected in the order specified in Policy. - // - // A maximum of 15 upstreams is allowed per ForwardPlugin. - // If no Upstreams are specified, /etc/resolv.conf is used by default - // - // +optional - // +kubebuilder:validation:MaxItems=15 - // +kubebuilder:default={{"type":"SystemResolvConf"}} - Upstreams []Upstream `json:"upstreams"` - - // Policy is used to determine the order in which upstream servers are selected for querying. - // Any one of the following values may be specified: - // - // * "Random" picks a random upstream server for each query. - // * "RoundRobin" picks upstream servers in a round-robin order, moving to the next server for each new query. - // * "Sequential" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. - // - // The default value is "Sequential" - // - // +optional - // +kubebuilder:default="Sequential" - Policy ForwardingPolicy `json:"policy,omitempty"` - - // transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use - // when forwarding DNS requests to an upstream resolver. - // - // The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS - // requests to an upstream resolver. - // - // +optional - TransportConfig DNSTransportConfig `json:"transportConfig,omitempty"` -} - -// Upstream can either be of type SystemResolvConf, or of type Network. -// -// * For an Upstream of type SystemResolvConf, no further fields are necessary: -// The upstream will be configured to use /etc/resolv.conf. -// * For an Upstream of type Network, a NetworkResolver field needs to be defined -// with an IP address or IP:port if the upstream listens on a port other than 53. -type Upstream struct { - - // Type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. - // Type accepts 2 possible values: SystemResolvConf or Network. - // - // * When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined: - // /etc/resolv.conf will be used - // * When Network is used, the Upstream structure must contain at least an Address - // - // +kubebuilder:validation:Required - // +required - Type UpstreamType `json:"type"` - - // Address must be defined when Type is set to Network. It will be ignored otherwise. - // It must be a valid ipv4 or ipv6 address. - // - // +optional - // +kubebuilder:validation:Optional - Address string `json:"address,omitempty"` - - // Port may be defined when Type is set to Network. It will be ignored otherwise. - // Port must be between 65535 - // - // +optional - // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Maximum=65535 - // +kubebuilder:validation:Optional - // +kubebuilder:default=53 - Port uint32 `json:"port,omitempty"` -} - -// +kubebuilder:validation:Enum=SystemResolvConf;Network;"" -type UpstreamType string - -const ( - SystemResolveConfType UpstreamType = "SystemResolvConf" - NetworkResolverType UpstreamType = "Network" -) - -// DNSNodePlacement describes the node scheduling configuration for DNS pods. -type DNSNodePlacement struct { - // nodeSelector is the node selector applied to DNS pods. - // - // If empty, the default is used, which is currently the following: - // - // kubernetes.io/os: linux - // - // This default is subject to change. - // - // If set, the specified selector is used and replaces the default. - // - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - - // tolerations is a list of tolerations applied to DNS pods. - // - // If empty, the DNS operator sets a toleration for the - // "node-role.kubernetes.io/master" taint. This default is subject to - // change. Specifying tolerations without including a toleration for - // the "node-role.kubernetes.io/master" taint may be risky as it could - // lead to an outage if all worker nodes become unavailable. - // - // Note that the daemon controller adds some tolerations as well. See - // https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ - // - // +optional - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` -} - -const ( - // Available indicates the DNS controller daemonset is available. - DNSAvailable = "Available" -) - -// DNSStatus defines the observed status of the DNS. -type DNSStatus struct { - // clusterIP is the service IP through which this DNS is made available. - // - // In the case of the default DNS, this will be a well known IP that is used - // as the default nameserver for pods that are using the default ClusterFirst DNS policy. - // - // In general, this IP can be specified in a pod's spec.dnsConfig.nameservers list - // or used explicitly when performing name resolution from within the cluster. - // Example: dig foo.com @ - // - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - // - // +kubebuilder:validation:Required - // +required - ClusterIP string `json:"clusterIP"` - - // clusterDomain is the local cluster DNS domain suffix for DNS services. - // This will be a subdomain as defined in RFC 1034, - // section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 - // Example: "cluster.local" - // - // More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service - // - // +kubebuilder:validation:Required - // +required - ClusterDomain string `json:"clusterDomain"` - - // conditions provide information about the state of the DNS on the cluster. - // - // These are the supported DNS conditions: - // - // * Available - // - True if the following conditions are met: - // * DNS controller daemonset is available. - // - False if any of those conditions are unsatisfied. - // - // +patchMergeKey=type - // +patchStrategy=merge - // +optional - Conditions []OperatorCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -// DNSList contains a list of DNS -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type DNSList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - - Items []DNS `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go deleted file mode 100644 index 6cd593ced..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_etcd.go +++ /dev/null @@ -1,46 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Etcd provides information to configure an operator to manage etcd. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type Etcd struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - // +kubebuilder:validation:Required - // +required - Spec EtcdSpec `json:"spec"` - // +optional - Status EtcdStatus `json:"status"` -} - -type EtcdSpec struct { - StaticPodOperatorSpec `json:",inline"` -} - -type EtcdStatus struct { - StaticPodOperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeAPISOperatorConfigList is a collection of items -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type EtcdList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - // Items contains the items - Items []Etcd `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go deleted file mode 100644 index 6e2968465..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ /dev/null @@ -1,1568 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - corev1 "k8s.io/api/core/v1" - - configv1 "github.com/openshift/api/config/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.availableReplicas,selectorpath=.status.selector - -// IngressController describes a managed ingress controller for the cluster. The -// controller can service OpenShift Route and Kubernetes Ingress resources. -// -// When an IngressController is created, a new ingress controller deployment is -// created to allow external traffic to reach the services that expose Ingress -// or Route resources. Updating this resource may lead to disruption for public -// facing network connections as a new ingress controller revision may be rolled -// out. -// -// https://kubernetes.io/docs/concepts/services-networking/ingress-controllers -// -// Whenever possible, sensible defaults for the platform are used. See each -// field for more details. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type IngressController struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec is the specification of the desired behavior of the IngressController. - Spec IngressControllerSpec `json:"spec,omitempty"` - // status is the most recently observed status of the IngressController. - Status IngressControllerStatus `json:"status,omitempty"` -} - -// IngressControllerSpec is the specification of the desired behavior of the -// IngressController. -type IngressControllerSpec struct { - // domain is a DNS name serviced by the ingress controller and is used to - // configure multiple features: - // - // * For the LoadBalancerService endpoint publishing strategy, domain is - // used to configure DNS records. See endpointPublishingStrategy. - // - // * When using a generated default certificate, the certificate will be valid - // for domain and its subdomains. See defaultCertificate. - // - // * The value is published to individual Route statuses so that end-users - // know where to target external DNS records. - // - // domain must be unique among all IngressControllers, and cannot be - // updated. - // - // If empty, defaults to ingress.config.openshift.io/cluster .spec.domain. - // - // +optional - Domain string `json:"domain,omitempty"` - - // httpErrorCodePages specifies a configmap with custom error pages. - // The administrator must create this configmap in the openshift-config namespace. - // This configmap should have keys in the format "error-page-.http", - // where is an HTTP error code. - // For example, "error-page-503.http" defines an error page for HTTP 503 responses. - // Currently only error pages for 503 and 404 responses can be customized. - // Each value in the configmap should be the full response, including HTTP headers. - // Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http - // If this field is empty, the ingress controller uses the default error pages. - HttpErrorCodePages configv1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"` - - // replicas is the desired number of ingress controller replicas. If unset, - // the default depends on the value of the defaultPlacement field in the - // cluster config.openshift.io/v1/ingresses status. - // - // The value of replicas is set based on the value of a chosen field in the - // Infrastructure CR. If defaultPlacement is set to ControlPlane, the - // chosen field will be controlPlaneTopology. If it is set to Workers the - // chosen field will be infrastructureTopology. Replicas will then be set to 1 - // or 2 based whether the chosen field's value is SingleReplica or - // HighlyAvailable, respectively. - // - // These defaults are subject to change. - // - // +optional - Replicas *int32 `json:"replicas,omitempty"` - - // endpointPublishingStrategy is used to publish the ingress controller - // endpoints to other networks, enable load balancer integrations, etc. - // - // If unset, the default is based on - // infrastructure.config.openshift.io/cluster .status.platform: - // - // AWS: LoadBalancerService (with External scope) - // Azure: LoadBalancerService (with External scope) - // GCP: LoadBalancerService (with External scope) - // IBMCloud: LoadBalancerService (with External scope) - // AlibabaCloud: LoadBalancerService (with External scope) - // Libvirt: HostNetwork - // - // Any other platform types (including None) default to HostNetwork. - // - // endpointPublishingStrategy cannot be updated. - // - // +optional - EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` - - // defaultCertificate is a reference to a secret containing the default - // certificate served by the ingress controller. When Routes don't specify - // their own certificate, defaultCertificate is used. - // - // The secret must contain the following keys and data: - // - // tls.crt: certificate file contents - // tls.key: key file contents - // - // If unset, a wildcard certificate is automatically generated and used. The - // certificate is valid for the ingress controller domain (and subdomains) and - // the generated certificate's CA will be automatically integrated with the - // cluster's trust store. - // - // If a wildcard certificate is used and shared by multiple - // HTTP/2 enabled routes (which implies ALPN) then clients - // (i.e., notably browsers) are at liberty to reuse open - // connections. This means a client can reuse a connection to - // another route and that is likely to fail. This behaviour is - // generally known as connection coalescing. - // - // The in-use certificate (whether generated or user-specified) will be - // automatically integrated with OpenShift's built-in OAuth server. - // - // +optional - DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"` - - // namespaceSelector is used to filter the set of namespaces serviced by the - // ingress controller. This is useful for implementing shards. - // - // If unset, the default is no filtering. - // - // +optional - NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` - - // routeSelector is used to filter the set of Routes serviced by the ingress - // controller. This is useful for implementing shards. - // - // If unset, the default is no filtering. - // - // +optional - RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"` - - // nodePlacement enables explicit control over the scheduling of the ingress - // controller. - // - // If unset, defaults are used. See NodePlacement for more details. - // - // +optional - NodePlacement *NodePlacement `json:"nodePlacement,omitempty"` - - // tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers. - // - // If unset, the default is based on the apiservers.config.openshift.io/cluster resource. - // - // Note that when using the Old, Intermediate, and Modern profile types, the effective - // profile configuration is subject to change between releases. For example, given - // a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade - // to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress - // controller, resulting in a rollout. - // - // +optional - TLSSecurityProfile *configv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` - - // clientTLS specifies settings for requesting and verifying client - // certificates, which can be used to enable mutual TLS for - // edge-terminated and reencrypt routes. - // - // +optional - ClientTLS ClientTLS `json:"clientTLS"` - - // routeAdmission defines a policy for handling new route claims (for example, - // to allow or deny claims across namespaces). - // - // If empty, defaults will be applied. See specific routeAdmission fields - // for details about their defaults. - // - // +optional - RouteAdmission *RouteAdmissionPolicy `json:"routeAdmission,omitempty"` - - // logging defines parameters for what should be logged where. If this - // field is empty, operational logs are enabled but access logs are - // disabled. - // - // +optional - Logging *IngressControllerLogging `json:"logging,omitempty"` - - // httpHeaders defines policy for HTTP headers. - // - // If this field is empty, the default values are used. - // - // +optional - HTTPHeaders *IngressControllerHTTPHeaders `json:"httpHeaders,omitempty"` - - // httpEmptyRequestsPolicy describes how HTTP connections should be - // handled if the connection times out before a request is received. - // Allowed values for this field are "Respond" and "Ignore". If the - // field is set to "Respond", the ingress controller sends an HTTP 400 - // or 408 response, logs the connection (if access logging is enabled), - // and counts the connection in the appropriate metrics. If the field - // is set to "Ignore", the ingress controller closes the connection - // without sending a response, logging the connection, or incrementing - // metrics. The default value is "Respond". - // - // Typically, these connections come from load balancers' health probes - // or Web browsers' speculative connections ("preconnect") and can be - // safely ignored. However, these requests may also be caused by - // network errors, and so setting this field to "Ignore" may impede - // detection and diagnosis of problems. In addition, these requests may - // be caused by port scans, in which case logging empty requests may aid - // in detecting intrusion attempts. - // - // +optional - // +kubebuilder:default:="Respond" - HTTPEmptyRequestsPolicy HTTPEmptyRequestsPolicy `json:"httpEmptyRequestsPolicy,omitempty"` - - // tuningOptions defines parameters for adjusting the performance of - // ingress controller pods. All fields are optional and will use their - // respective defaults if not set. See specific tuningOptions fields for - // more details. - // - // Setting fields within tuningOptions is generally not recommended. The - // default values are suitable for most configurations. - // - // +optional - TuningOptions IngressControllerTuningOptions `json:"tuningOptions,omitempty"` - - // unsupportedConfigOverrides allows specifying unsupported - // configuration options. Its use is unsupported. - // - // +optional - // +nullable - // +kubebuilder:pruning:PreserveUnknownFields - UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"` - - // httpCompression defines a policy for HTTP traffic compression. - // By default, there is no HTTP compression. - // - // +optional - HTTPCompression HTTPCompressionPolicy `json:"httpCompression,omitempty"` -} - -// httpCompressionPolicy turns on compression for the specified MIME types. -// -// This field is optional, and its absence implies that compression should not be enabled -// globally in HAProxy. -// -// If httpCompressionPolicy exists, compression should be enabled only for the specified -// MIME types. -type HTTPCompressionPolicy struct { - // mimeTypes is a list of MIME types that should have compression applied. - // This list can be empty, in which case the ingress controller does not apply compression. - // - // Note: Not all MIME types benefit from compression, but HAProxy will still use resources - // to try to compress if instructed to. Generally speaking, text (html, css, js, etc.) - // formats benefit from compression, but formats that are already compressed (image, - // audio, video, etc.) benefit little in exchange for the time and cpu spent on compressing - // again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2 - // - // +listType=set - MimeTypes []CompressionMIMEType `json:"mimeTypes,omitempty"` -} - -// CompressionMIMEType defines the format of a single MIME type. -// E.g. "text/css; charset=utf-8", "text/html", "text/*", "image/svg+xml", -// "application/octet-stream", "X-custom/customsub", etc. -// -// The format should follow the Content-Type definition in RFC 1341: -// Content-Type := type "/" subtype *[";" parameter] -// - The type in Content-Type can be one of: -// application, audio, image, message, multipart, text, video, or a custom -// type preceded by "X-" and followed by a token as defined below. -// - The token is a string of at least one character, and not containing white -// space, control characters, or any of the characters in the tspecials set. -// - The tspecials set contains the characters ()<>@,;:\"/[]?.= -// - The subtype in Content-Type is also a token. -// - The optional parameter/s following the subtype are defined as: -// token "=" (token / quoted-string) -// - The quoted-string, as defined in RFC 822, is surrounded by double quotes -// and can contain white space plus any character EXCEPT \, ", and CR. -// It can also contain any single ASCII character as long as it is escaped by \. -// -// +kubebuilder:validation:Pattern=`^(?i)(x-[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|application|audio|image|message|multipart|text|video)/[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+(; *[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+=([^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|"(\\[\x00-\x7F]|[^\x0D"\\])*"))*$` -type CompressionMIMEType string - -// NodePlacement describes node scheduling configuration for an ingress -// controller. -type NodePlacement struct { - // nodeSelector is the node selector applied to ingress controller - // deployments. - // - // If set, the specified selector is used and replaces the default. - // - // If unset, the default depends on the value of the defaultPlacement - // field in the cluster config.openshift.io/v1/ingresses status. - // - // When defaultPlacement is Workers, the default is: - // - // kubernetes.io/os: linux - // node-role.kubernetes.io/worker: '' - // - // When defaultPlacement is ControlPlane, the default is: - // - // kubernetes.io/os: linux - // node-role.kubernetes.io/master: '' - // - // These defaults are subject to change. - // - // +optional - NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` - - // tolerations is a list of tolerations applied to ingress controller - // deployments. - // - // The default is an empty list. - // - // See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - // - // +optional - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` -} - -// EndpointPublishingStrategyType is a way to publish ingress controller endpoints. -// +kubebuilder:validation:Enum=LoadBalancerService;HostNetwork;Private;NodePortService -type EndpointPublishingStrategyType string - -const ( - // LoadBalancerService publishes the ingress controller using a Kubernetes - // LoadBalancer Service. - LoadBalancerServiceStrategyType EndpointPublishingStrategyType = "LoadBalancerService" - - // HostNetwork publishes the ingress controller on node ports where the - // ingress controller is deployed. - HostNetworkStrategyType EndpointPublishingStrategyType = "HostNetwork" - - // Private does not publish the ingress controller. - PrivateStrategyType EndpointPublishingStrategyType = "Private" - - // NodePortService publishes the ingress controller using a Kubernetes NodePort Service. - NodePortServiceStrategyType EndpointPublishingStrategyType = "NodePortService" -) - -// LoadBalancerScope is the scope at which a load balancer is exposed. -// +kubebuilder:validation:Enum=Internal;External -type LoadBalancerScope string - -var ( - // InternalLoadBalancer is a load balancer that is exposed only on the - // cluster's private network. - InternalLoadBalancer LoadBalancerScope = "Internal" - - // ExternalLoadBalancer is a load balancer that is exposed on the - // cluster's public network (which is typically on the Internet). - ExternalLoadBalancer LoadBalancerScope = "External" -) - -// LoadBalancerStrategy holds parameters for a load balancer. -type LoadBalancerStrategy struct { - // scope indicates the scope at which the load balancer is exposed. - // Possible values are "External" and "Internal". - // - // +kubebuilder:validation:Required - // +required - Scope LoadBalancerScope `json:"scope"` - - // providerParameters holds desired load balancer information specific to - // the underlying infrastructure provider. - // - // If empty, defaults will be applied. See specific providerParameters - // fields for details about their defaults. - // - // +optional - ProviderParameters *ProviderLoadBalancerParameters `json:"providerParameters,omitempty"` -} - -// ProviderLoadBalancerParameters holds desired load balancer information -// specific to the underlying infrastructure provider. -// +union -type ProviderLoadBalancerParameters struct { - // type is the underlying infrastructure provider for the load balancer. - // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Nutanix", - // "OpenStack", and "VSphere". - // - // +unionDiscriminator - // +kubebuilder:validation:Required - // +required - Type LoadBalancerProviderType `json:"type"` - - // aws provides configuration settings that are specific to AWS - // load balancers. - // - // If empty, defaults will be applied. See specific aws fields for - // details about their defaults. - // - // +optional - AWS *AWSLoadBalancerParameters `json:"aws,omitempty"` - - // gcp provides configuration settings that are specific to GCP - // load balancers. - // - // If empty, defaults will be applied. See specific gcp fields for - // details about their defaults. - // - // +optional - GCP *GCPLoadBalancerParameters `json:"gcp,omitempty"` -} - -// LoadBalancerProviderType is the underlying infrastructure provider for the -// load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Nutanix", -// "OpenStack", and "VSphere". -// -// +kubebuilder:validation:Enum=AWS;Azure;BareMetal;GCP;Nutanix;OpenStack;VSphere;IBM -type LoadBalancerProviderType string - -const ( - AWSLoadBalancerProvider LoadBalancerProviderType = "AWS" - AzureLoadBalancerProvider LoadBalancerProviderType = "Azure" - GCPLoadBalancerProvider LoadBalancerProviderType = "GCP" - OpenStackLoadBalancerProvider LoadBalancerProviderType = "OpenStack" - VSphereLoadBalancerProvider LoadBalancerProviderType = "VSphere" - IBMLoadBalancerProvider LoadBalancerProviderType = "IBM" - BareMetalLoadBalancerProvider LoadBalancerProviderType = "BareMetal" - AlibabaCloudLoadBalancerProvider LoadBalancerProviderType = "AlibabaCloud" - NutanixLoadBalancerProvider LoadBalancerProviderType = "Nutanix" -) - -// AWSLoadBalancerParameters provides configuration settings that are -// specific to AWS load balancers. -// +union -type AWSLoadBalancerParameters struct { - // type is the type of AWS load balancer to instantiate for an ingresscontroller. - // - // Valid values are: - // - // * "Classic": A Classic Load Balancer that makes routing decisions at either - // the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See - // the following for additional details: - // - // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb - // - // * "NLB": A Network Load Balancer that makes routing decisions at the - // transport layer (TCP/SSL). See the following for additional details: - // - // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb - // - // +unionDiscriminator - // +kubebuilder:validation:Required - // +required - Type AWSLoadBalancerType `json:"type"` - - // classicLoadBalancerParameters holds configuration parameters for an AWS - // classic load balancer. Present only if type is Classic. - // - // +optional - ClassicLoadBalancerParameters *AWSClassicLoadBalancerParameters `json:"classicLoadBalancer,omitempty"` - - // networkLoadBalancerParameters holds configuration parameters for an AWS - // network load balancer. Present only if type is NLB. - // - // +optional - NetworkLoadBalancerParameters *AWSNetworkLoadBalancerParameters `json:"networkLoadBalancer,omitempty"` -} - -// AWSLoadBalancerType is the type of AWS load balancer to instantiate. -// +kubebuilder:validation:Enum=Classic;NLB -type AWSLoadBalancerType string - -const ( - AWSClassicLoadBalancer AWSLoadBalancerType = "Classic" - AWSNetworkLoadBalancer AWSLoadBalancerType = "NLB" -) - -// GCPLoadBalancerParameters provides configuration settings that are -// specific to GCP load balancers. -type GCPLoadBalancerParameters struct { - // clientAccess describes how client access is restricted for internal - // load balancers. - // - // Valid values are: - // * "Global": Specifying an internal load balancer with Global client access - // allows clients from any region within the VPC to communicate with the load - // balancer. - // - // https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access - // - // * "Local": Specifying an internal load balancer with Local client access - // means only clients within the same region (and VPC) as the GCP load balancer - // can communicate with the load balancer. Note that this is the default behavior. - // - // https://cloud.google.com/load-balancing/docs/internal#client_access - // - // +optional - ClientAccess GCPClientAccess `json:"clientAccess,omitempty"` -} - -// GCPClientAccess describes how client access is restricted for internal -// load balancers. -// +kubebuilder:validation:Enum=Global;Local -type GCPClientAccess string - -const ( - GCPGlobalAccess GCPClientAccess = "Global" - GCPLocalAccess GCPClientAccess = "Local" -) - -// AWSClassicLoadBalancerParameters holds configuration parameters for an -// AWS Classic load balancer. -type AWSClassicLoadBalancerParameters struct { - // connectionIdleTimeout specifies the maximum time period that a - // connection may be idle before the load balancer closes the - // connection. The value must be parseable as a time duration value; - // see . A nil or zero value - // means no opinion, in which case a default value is used. The default - // value for this field is 60s. This default is subject to change. - // - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Format=duration - // +optional - ConnectionIdleTimeout metav1.Duration `json:"connectionIdleTimeout,omitempty"` -} - -// AWSNetworkLoadBalancerParameters holds configuration parameters for an -// AWS Network load balancer. -type AWSNetworkLoadBalancerParameters struct { -} - -// HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing -// strategy. -type HostNetworkStrategy struct { - // protocol specifies whether the IngressController expects incoming - // connections to use plain TCP or whether the IngressController expects - // PROXY protocol. - // - // PROXY protocol can be used with load balancers that support it to - // communicate the source addresses of client connections when - // forwarding those connections to the IngressController. Using PROXY - // protocol enables the IngressController to report those source - // addresses instead of reporting the load balancer's address in HTTP - // headers and logs. Note that enabling PROXY protocol on the - // IngressController will cause connections to fail if you are not using - // a load balancer that uses PROXY protocol to forward connections to - // the IngressController. See - // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for - // information about PROXY protocol. - // - // The following values are valid for this field: - // - // * The empty string. - // * "TCP". - // * "PROXY". - // - // The empty string specifies the default, which is TCP without PROXY - // protocol. Note that the default is subject to change. - // - // +kubebuilder:validation:Optional - // +optional - Protocol IngressControllerProtocol `json:"protocol,omitempty"` - - // httpPort is the port on the host which should be used to listen for - // HTTP requests. This field should be set when port 80 is already in use. - // The value should not coincide with the NodePort range of the cluster. - // When the value is 0 or is not specified it defaults to 80. - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Maximum=65535 - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:default=80 - // +optional - HTTPPort int32 `json:"httpPort,omitempty"` - - // httpsPort is the port on the host which should be used to listen for - // HTTPS requests. This field should be set when port 443 is already in use. - // The value should not coincide with the NodePort range of the cluster. - // When the value is 0 or is not specified it defaults to 443. - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Maximum=65535 - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:default=443 - // +optional - HTTPSPort int32 `json:"httpsPort,omitempty"` - - // statsPort is the port on the host where the stats from the router are - // published. The value should not coincide with the NodePort range of the - // cluster. If an external load balancer is configured to forward connections - // to this IngressController, the load balancer should use this port for - // health checks. The load balancer can send HTTP probes on this port on a - // given node, with the path /healthz/ready to determine if the ingress - // controller is ready to receive traffic on the node. For proper operation - // the load balancer must not forward traffic to a node until the health - // check reports ready. The load balancer should also stop forwarding requests - // within a maximum of 45 seconds after /healthz/ready starts reporting - // not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with - // a threshold of two successful or failed requests to become healthy or - // unhealthy respectively, are well-tested values. When the value is 0 or - // is not specified it defaults to 1936. - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Maximum=65535 - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:default=1936 - // +optional - StatsPort int32 `json:"statsPort,omitempty"` -} - -// PrivateStrategy holds parameters for the Private endpoint publishing -// strategy. -type PrivateStrategy struct { -} - -// NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy. -type NodePortStrategy struct { - // protocol specifies whether the IngressController expects incoming - // connections to use plain TCP or whether the IngressController expects - // PROXY protocol. - // - // PROXY protocol can be used with load balancers that support it to - // communicate the source addresses of client connections when - // forwarding those connections to the IngressController. Using PROXY - // protocol enables the IngressController to report those source - // addresses instead of reporting the load balancer's address in HTTP - // headers and logs. Note that enabling PROXY protocol on the - // IngressController will cause connections to fail if you are not using - // a load balancer that uses PROXY protocol to forward connections to - // the IngressController. See - // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for - // information about PROXY protocol. - // - // The following values are valid for this field: - // - // * The empty string. - // * "TCP". - // * "PROXY". - // - // The empty string specifies the default, which is TCP without PROXY - // protocol. Note that the default is subject to change. - // - // +kubebuilder:validation:Optional - // +optional - Protocol IngressControllerProtocol `json:"protocol,omitempty"` -} - -// IngressControllerProtocol specifies whether PROXY protocol is enabled or not. -// +kubebuilder:validation:Enum="";TCP;PROXY -type IngressControllerProtocol string - -const ( - DefaultProtocol IngressControllerProtocol = "" - TCPProtocol IngressControllerProtocol = "TCP" - ProxyProtocol IngressControllerProtocol = "PROXY" -) - -// EndpointPublishingStrategy is a way to publish the endpoints of an -// IngressController, and represents the type and any additional configuration -// for a specific type. -// +union -type EndpointPublishingStrategy struct { - // type is the publishing strategy to use. Valid values are: - // - // * LoadBalancerService - // - // Publishes the ingress controller using a Kubernetes LoadBalancer Service. - // - // In this configuration, the ingress controller deployment uses container - // networking. A LoadBalancer Service is created to publish the deployment. - // - // See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer - // - // If domain is set, a wildcard DNS record will be managed to point at the - // LoadBalancer Service's external name. DNS records are managed only in DNS - // zones defined by dns.config.openshift.io/cluster .spec.publicZone and - // .spec.privateZone. - // - // Wildcard DNS management is currently supported only on the AWS, Azure, - // and GCP platforms. - // - // * HostNetwork - // - // Publishes the ingress controller on node ports where the ingress controller - // is deployed. - // - // In this configuration, the ingress controller deployment uses host - // networking, bound to node ports 80 and 443. The user is responsible for - // configuring an external load balancer to publish the ingress controller via - // the node ports. - // - // * Private - // - // Does not publish the ingress controller. - // - // In this configuration, the ingress controller deployment uses container - // networking, and is not explicitly published. The user must manually publish - // the ingress controller. - // - // * NodePortService - // - // Publishes the ingress controller using a Kubernetes NodePort Service. - // - // In this configuration, the ingress controller deployment uses container - // networking. A NodePort Service is created to publish the deployment. The - // specific node ports are dynamically allocated by OpenShift; however, to - // support static port allocations, user changes to the node port - // field of the managed NodePort Service will preserved. - // - // +unionDiscriminator - // +kubebuilder:validation:Required - // +required - Type EndpointPublishingStrategyType `json:"type"` - - // loadBalancer holds parameters for the load balancer. Present only if - // type is LoadBalancerService. - // +optional - LoadBalancer *LoadBalancerStrategy `json:"loadBalancer,omitempty"` - - // hostNetwork holds parameters for the HostNetwork endpoint publishing - // strategy. Present only if type is HostNetwork. - // +optional - HostNetwork *HostNetworkStrategy `json:"hostNetwork,omitempty"` - - // private holds parameters for the Private endpoint publishing - // strategy. Present only if type is Private. - // +optional - Private *PrivateStrategy `json:"private,omitempty"` - - // nodePort holds parameters for the NodePortService endpoint publishing strategy. - // Present only if type is NodePortService. - // +optional - NodePort *NodePortStrategy `json:"nodePort,omitempty"` -} - -// ClientCertificatePolicy describes the policy for client certificates. -// +kubebuilder:validation:Enum="";Required;Optional -type ClientCertificatePolicy string - -const ( - // ClientCertificatePolicyRequired indicates that a client certificate - // should be required. - ClientCertificatePolicyRequired ClientCertificatePolicy = "Required" - - // ClientCertificatePolicyOptional indicates that a client certificate - // should be requested but not required. - ClientCertificatePolicyOptional ClientCertificatePolicy = "Optional" -) - -// ClientTLS specifies TLS configuration to enable client-to-server -// authentication, which can be used for mutual TLS. -type ClientTLS struct { - // clientCertificatePolicy specifies whether the ingress controller - // requires clients to provide certificates. This field accepts the - // values "Required" or "Optional". - // - // Note that the ingress controller only checks client certificates for - // edge-terminated and reencrypt TLS routes; it cannot check - // certificates for cleartext HTTP or passthrough TLS routes. - // - // +kubebuilder:validation:Required - // +required - ClientCertificatePolicy ClientCertificatePolicy `json:"clientCertificatePolicy"` - - // clientCA specifies a configmap containing the PEM-encoded CA - // certificate bundle that should be used to verify a client's - // certificate. The administrator must create this configmap in the - // openshift-config namespace. - // - // +kubebuilder:validation:Required - // +required - ClientCA configv1.ConfigMapNameReference `json:"clientCA"` - - // allowedSubjectPatterns specifies a list of regular expressions that - // should be matched against the distinguished name on a valid client - // certificate to filter requests. The regular expressions must use - // PCRE syntax. If this list is empty, no filtering is performed. If - // the list is nonempty, then at least one pattern must match a client - // certificate's distinguished name or else the ingress controller - // rejects the certificate and denies the connection. - // - // +listType=atomic - // +optional - AllowedSubjectPatterns []string `json:"allowedSubjectPatterns,omitempty"` -} - -// RouteAdmissionPolicy is an admission policy for allowing new route claims. -type RouteAdmissionPolicy struct { - // namespaceOwnership describes how host name claims across namespaces should - // be handled. - // - // Value must be one of: - // - // - Strict: Do not allow routes in different namespaces to claim the same host. - // - // - InterNamespaceAllowed: Allow routes to claim different paths of the same - // host name across namespaces. - // - // If empty, the default is Strict. - // +optional - NamespaceOwnership NamespaceOwnershipCheck `json:"namespaceOwnership,omitempty"` - // wildcardPolicy describes how routes with wildcard policies should - // be handled for the ingress controller. WildcardPolicy controls use - // of routes [1] exposed by the ingress controller based on the route's - // wildcard policy. - // - // [1] https://github.com/openshift/api/blob/master/route/v1/types.go - // - // Note: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed - // will cause admitted routes with a wildcard policy of Subdomain to stop - // working. These routes must be updated to a wildcard policy of None to be - // readmitted by the ingress controller. - // - // WildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values. - // - // If empty, defaults to "WildcardsDisallowed". - // - WildcardPolicy WildcardPolicy `json:"wildcardPolicy,omitempty"` -} - -// WildcardPolicy is a route admission policy component that describes how -// routes with a wildcard policy should be handled. -// +kubebuilder:validation:Enum=WildcardsAllowed;WildcardsDisallowed -type WildcardPolicy string - -const ( - // WildcardPolicyAllowed indicates routes with any wildcard policy are - // admitted by the ingress controller. - WildcardPolicyAllowed WildcardPolicy = "WildcardsAllowed" - - // WildcardPolicyDisallowed indicates only routes with a wildcard policy - // of None are admitted by the ingress controller. - WildcardPolicyDisallowed WildcardPolicy = "WildcardsDisallowed" -) - -// NamespaceOwnershipCheck is a route admission policy component that describes -// how host name claims across namespaces should be handled. -// +kubebuilder:validation:Enum=InterNamespaceAllowed;Strict -type NamespaceOwnershipCheck string - -const ( - // InterNamespaceAllowedOwnershipCheck allows routes to claim different paths of the same host name across namespaces. - InterNamespaceAllowedOwnershipCheck NamespaceOwnershipCheck = "InterNamespaceAllowed" - - // StrictNamespaceOwnershipCheck does not allow routes to claim the same host name across namespaces. - StrictNamespaceOwnershipCheck NamespaceOwnershipCheck = "Strict" -) - -// LoggingDestinationType is a type of destination to which to send log -// messages. -// -// +kubebuilder:validation:Enum=Container;Syslog -type LoggingDestinationType string - -const ( - // Container sends log messages to a sidecar container. - ContainerLoggingDestinationType LoggingDestinationType = "Container" - - // Syslog sends log messages to a syslog endpoint. - SyslogLoggingDestinationType LoggingDestinationType = "Syslog" - - // ContainerLoggingSidecarContainerName is the name of the container - // with the log output in an ingress controller pod when container - // logging is used. - ContainerLoggingSidecarContainerName = "logs" -) - -// SyslogLoggingDestinationParameters describes parameters for the Syslog -// logging destination type. -type SyslogLoggingDestinationParameters struct { - // address is the IP address of the syslog endpoint that receives log - // messages. - // - // +kubebuilder:validation:Required - // +required - Address string `json:"address"` - - // port is the UDP port number of the syslog endpoint that receives log - // messages. - // - // +kubebuilder:validation:Required - // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Maximum=65535 - // +required - Port uint32 `json:"port"` - - // facility specifies the syslog facility of log messages. - // - // If this field is empty, the facility is "local1". - // - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;cron;auth2;ftp;ntp;audit;alert;cron2;local0;local1;local2;local3;local4;local5;local6;local7 - // +optional - Facility string `json:"facility,omitempty"` - - // maxLength is the maximum length of the syslog message - // - // If this field is empty, the maxLength is set to "1024". - // - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Maximum=4096 - // +kubebuilder:validation:Minimum=480 - // +kubebuilder:default=1024 - // +optional - MaxLength uint32 `json:"maxLength,omitempty"` -} - -// ContainerLoggingDestinationParameters describes parameters for the Container -// logging destination type. -type ContainerLoggingDestinationParameters struct { -} - -// LoggingDestination describes a destination for log messages. -// +union -type LoggingDestination struct { - // type is the type of destination for logs. It must be one of the - // following: - // - // * Container - // - // The ingress operator configures the sidecar container named "logs" on - // the ingress controller pod and configures the ingress controller to - // write logs to the sidecar. The logs are then available as container - // logs. The expectation is that the administrator configures a custom - // logging solution that reads logs from this sidecar. Note that using - // container logs means that logs may be dropped if the rate of logs - // exceeds the container runtime's or the custom logging solution's - // capacity. - // - // * Syslog - // - // Logs are sent to a syslog endpoint. The administrator must specify - // an endpoint that can receive syslog messages. The expectation is - // that the administrator has configured a custom syslog instance. - // - // +unionDiscriminator - // +kubebuilder:validation:Required - // +required - Type LoggingDestinationType `json:"type"` - - // syslog holds parameters for a syslog endpoint. Present only if - // type is Syslog. - // - // +optional - Syslog *SyslogLoggingDestinationParameters `json:"syslog,omitempty"` - - // container holds parameters for the Container logging destination. - // Present only if type is Container. - // - // +optional - Container *ContainerLoggingDestinationParameters `json:"container,omitempty"` -} - -// IngressControllerCaptureHTTPHeader describes an HTTP header that should be -// captured. -type IngressControllerCaptureHTTPHeader struct { - // name specifies a header name. Its value must be a valid HTTP header - // name as defined in RFC 2616 section 4.2. - // - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" - // +required - Name string `json:"name"` - - // maxLength specifies a maximum length for the header value. If a - // header value exceeds this length, the value will be truncated in the - // log message. Note that the ingress controller may impose a separate - // bound on the total length of HTTP headers in a request. - // - // +kubebuilder:validation:Required - // +kubebuilder:validation:Minimum=1 - // +required - MaxLength int `json:"maxLength"` -} - -// IngressControllerCaptureHTTPHeaders specifies which HTTP headers the -// IngressController captures. -type IngressControllerCaptureHTTPHeaders struct { - // request specifies which HTTP request headers to capture. - // - // If this field is empty, no request headers are captured. - // - // +nullable - // +optional - Request []IngressControllerCaptureHTTPHeader `json:"request,omitempty"` - - // response specifies which HTTP response headers to capture. - // - // If this field is empty, no response headers are captured. - // - // +nullable - // +optional - Response []IngressControllerCaptureHTTPHeader `json:"response,omitempty"` -} - -// CookieMatchType indicates the type of matching used against cookie names to -// select a cookie for capture. -// +kubebuilder:validation:Enum=Exact;Prefix -type CookieMatchType string - -const ( - // CookieMatchTypeExact indicates that an exact string match should be - // performed. - CookieMatchTypeExact CookieMatchType = "Exact" - // CookieMatchTypePrefix indicates that a string prefix match should be - // performed. - CookieMatchTypePrefix CookieMatchType = "Prefix" -) - -// IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be -// captured. -type IngressControllerCaptureHTTPCookie struct { - IngressControllerCaptureHTTPCookieUnion `json:",inline"` - - // maxLength specifies a maximum length of the string that will be - // logged, which includes the cookie name, cookie value, and - // one-character delimiter. If the log entry exceeds this length, the - // value will be truncated in the log message. Note that the ingress - // controller may impose a separate bound on the total length of HTTP - // headers in a request. - // - // +kubebuilder:validation:Required - // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Maximum=1024 - // +required - MaxLength int `json:"maxLength"` -} - -// IngressControllerCaptureHTTPCookieUnion describes optional fields of an HTTP cookie that should be captured. -// +union -type IngressControllerCaptureHTTPCookieUnion struct { - // matchType specifies the type of match to be performed on the cookie - // name. Allowed values are "Exact" for an exact string match and - // "Prefix" for a string prefix match. If "Exact" is specified, a name - // must be specified in the name field. If "Prefix" is provided, a - // prefix must be specified in the namePrefix field. For example, - // specifying matchType "Prefix" and namePrefix "foo" will capture a - // cookie named "foo" or "foobar" but not one named "bar". The first - // matching cookie is captured. - // - // +unionDiscriminator - // +kubebuilder:validation:Required - // +required - MatchType CookieMatchType `json:"matchType,omitempty"` - - // name specifies a cookie name. Its value must be a valid HTTP cookie - // name as defined in RFC 6265 section 4.1. - // - // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$" - // +kubebuilder:validation:MinLength=0 - // +kubebuilder:validation:MaxLength=1024 - // +optional - Name string `json:"name"` - - // namePrefix specifies a cookie name prefix. Its value must be a valid - // HTTP cookie name as defined in RFC 6265 section 4.1. - // - // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$" - // +kubebuilder:validation:MinLength=0 - // +kubebuilder:validation:MaxLength=1024 - // +optional - NamePrefix string `json:"namePrefix"` -} - -// LoggingPolicy indicates how an event should be logged. -// +kubebuilder:validation:Enum=Log;Ignore -type LoggingPolicy string - -const ( - // LoggingPolicyLog indicates that an event should be logged. - LoggingPolicyLog LoggingPolicy = "Log" - // LoggingPolicyIgnore indicates that an event should not be logged. - LoggingPolicyIgnore LoggingPolicy = "Ignore" -) - -// AccessLogging describes how client requests should be logged. -type AccessLogging struct { - // destination is where access logs go. - // - // +kubebuilder:validation:Required - // +required - Destination LoggingDestination `json:"destination"` - - // httpLogFormat specifies the format of the log message for an HTTP - // request. - // - // If this field is empty, log messages use the implementation's default - // HTTP log format. For HAProxy's default HTTP log format, see the - // HAProxy documentation: - // http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 - // - // Note that this format only applies to cleartext HTTP connections - // and to secure HTTP connections for which the ingress controller - // terminates encryption (that is, edge-terminated or reencrypt - // connections). It does not affect the log format for TLS passthrough - // connections. - // - // +optional - HttpLogFormat string `json:"httpLogFormat,omitempty"` - - // httpCaptureHeaders defines HTTP headers that should be captured in - // access logs. If this field is empty, no headers are captured. - // - // Note that this option only applies to cleartext HTTP connections - // and to secure HTTP connections for which the ingress controller - // terminates encryption (that is, edge-terminated or reencrypt - // connections). Headers cannot be captured for TLS passthrough - // connections. - // - // +optional - HTTPCaptureHeaders IngressControllerCaptureHTTPHeaders `json:"httpCaptureHeaders,omitempty"` - - // httpCaptureCookies specifies HTTP cookies that should be captured in - // access logs. If this field is empty, no cookies are captured. - // - // +nullable - // +optional - // +kubebuilder:validation:MaxItems=1 - HTTPCaptureCookies []IngressControllerCaptureHTTPCookie `json:"httpCaptureCookies,omitempty"` - - // logEmptyRequests specifies how connections on which no request is - // received should be logged. Typically, these empty requests come from - // load balancers' health probes or Web browsers' speculative - // connections ("preconnect"), in which case logging these requests may - // be undesirable. However, these requests may also be caused by - // network errors, in which case logging empty requests may be useful - // for diagnosing the errors. In addition, these requests may be caused - // by port scans, in which case logging empty requests may aid in - // detecting intrusion attempts. Allowed values for this field are - // "Log" and "Ignore". The default value is "Log". - // - // +optional - // +kubebuilder:default:="Log" - LogEmptyRequests LoggingPolicy `json:"logEmptyRequests,omitempty"` -} - -// IngressControllerLogging describes what should be logged where. -type IngressControllerLogging struct { - // access describes how the client requests should be logged. - // - // If this field is empty, access logging is disabled. - // - // +optional - Access *AccessLogging `json:"access,omitempty"` -} - -// IngressControllerHTTPHeaderPolicy is a policy for setting HTTP headers. -// -// +kubebuilder:validation:Enum=Append;Replace;IfNone;Never -type IngressControllerHTTPHeaderPolicy string - -const ( - // AppendHTTPHeaderPolicy appends the header, preserving any existing header. - AppendHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Append" - // ReplaceHTTPHeaderPolicy sets the header, removing any existing header. - ReplaceHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Replace" - // IfNoneHTTPHeaderPolicy sets the header if it is not already set. - IfNoneHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "IfNone" - // NeverHTTPHeaderPolicy never sets the header, preserving any existing - // header. - NeverHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Never" -) - -// IngressControllerHTTPUniqueIdHeaderPolicy describes configuration for a -// unique id header. -type IngressControllerHTTPUniqueIdHeaderPolicy struct { - // name specifies the name of the HTTP header (for example, "unique-id") - // that the ingress controller should inject into HTTP requests. The - // field's value must be a valid HTTP header name as defined in RFC 2616 - // section 4.2. If the field is empty, no header is injected. - // - // +optional - // +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" - // +kubebuilder:validation:MinLength=0 - // +kubebuilder:validation:MaxLength=1024 - Name string `json:"name,omitempty"` - - // format specifies the format for the injected HTTP header's value. - // This field has no effect unless name is specified. For the - // HAProxy-based ingress controller implementation, this format uses the - // same syntax as the HTTP log format. If the field is empty, the - // default value is "%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid"; see the - // corresponding HAProxy documentation: - // http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 - // - // +optional - // +kubebuilder:validation:Pattern="^(%(%|(\\{[-+]?[QXE](,[-+]?[QXE])*\\})?([A-Za-z]+|\\[[.0-9A-Z_a-z]+(\\([^)]+\\))?(,[.0-9A-Z_a-z]+(\\([^)]+\\))?)*\\]))|[^%[:cntrl:]])*$" - // +kubebuilder:validation:MinLength=0 - // +kubebuilder:validation:MaxLength=1024 - Format string `json:"format,omitempty"` -} - -// IngressControllerHTTPHeaderNameCaseAdjustment is the name of an HTTP header -// (for example, "X-Forwarded-For") in the desired capitalization. The value -// must be a valid HTTP header name as defined in RFC 2616 section 4.2. -// -// +optional -// +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" -// +kubebuilder:validation:MinLength=0 -// +kubebuilder:validation:MaxLength=1024 -type IngressControllerHTTPHeaderNameCaseAdjustment string - -// IngressControllerHTTPHeaders specifies how the IngressController handles -// certain HTTP headers. -type IngressControllerHTTPHeaders struct { - // forwardedHeaderPolicy specifies when and how the IngressController - // sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, - // X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version - // HTTP headers. The value may be one of the following: - // - // * "Append", which specifies that the IngressController appends the - // headers, preserving existing headers. - // - // * "Replace", which specifies that the IngressController sets the - // headers, replacing any existing Forwarded or X-Forwarded-* headers. - // - // * "IfNone", which specifies that the IngressController sets the - // headers if they are not already set. - // - // * "Never", which specifies that the IngressController never sets the - // headers, preserving any existing headers. - // - // By default, the policy is "Append". - // - // +optional - ForwardedHeaderPolicy IngressControllerHTTPHeaderPolicy `json:"forwardedHeaderPolicy,omitempty"` - - // uniqueId describes configuration for a custom HTTP header that the - // ingress controller should inject into incoming HTTP requests. - // Typically, this header is configured to have a value that is unique - // to the HTTP request. The header can be used by applications or - // included in access logs to facilitate tracing individual HTTP - // requests. - // - // If this field is empty, no such header is injected into requests. - // - // +optional - UniqueId IngressControllerHTTPUniqueIdHeaderPolicy `json:"uniqueId,omitempty"` - - // headerNameCaseAdjustments specifies case adjustments that can be - // applied to HTTP header names. Each adjustment is specified as an - // HTTP header name with the desired capitalization. For example, - // specifying "X-Forwarded-For" indicates that the "x-forwarded-for" - // HTTP header should be adjusted to have the specified capitalization. - // - // These adjustments are only applied to cleartext, edge-terminated, and - // re-encrypt routes, and only when using HTTP/1. - // - // For request headers, these adjustments are applied only for routes - // that have the haproxy.router.openshift.io/h1-adjust-case=true - // annotation. For response headers, these adjustments are applied to - // all HTTP responses. - // - // If this field is empty, no request headers are adjusted. - // - // +nullable - // +optional - HeaderNameCaseAdjustments []IngressControllerHTTPHeaderNameCaseAdjustment `json:"headerNameCaseAdjustments,omitempty"` -} - -// IngressControllerTuningOptions specifies options for tuning the performance -// of ingress controller pods -type IngressControllerTuningOptions struct { - // headerBufferBytes describes how much memory should be reserved - // (in bytes) for IngressController connection sessions. - // Note that this value must be at least 16384 if HTTP/2 is - // enabled for the IngressController (https://tools.ietf.org/html/rfc7540). - // If this field is empty, the IngressController will use a default value - // of 32768 bytes. - // - // Setting this field is generally not recommended as headerBufferBytes - // values that are too small may break the IngressController and - // headerBufferBytes values that are too large could cause the - // IngressController to use significantly more memory than necessary. - // - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Minimum=16384 - // +optional - HeaderBufferBytes int32 `json:"headerBufferBytes,omitempty"` - - // headerBufferMaxRewriteBytes describes how much memory should be reserved - // (in bytes) from headerBufferBytes for HTTP header rewriting - // and appending for IngressController connection sessions. - // Note that incoming HTTP requests will be limited to - // (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning - // headerBufferBytes must be greater than headerBufferMaxRewriteBytes. - // If this field is empty, the IngressController will use a default value - // of 8192 bytes. - // - // Setting this field is generally not recommended as - // headerBufferMaxRewriteBytes values that are too small may break the - // IngressController and headerBufferMaxRewriteBytes values that are too - // large could cause the IngressController to use significantly more memory - // than necessary. - // - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Minimum=4096 - // +optional - HeaderBufferMaxRewriteBytes int32 `json:"headerBufferMaxRewriteBytes,omitempty"` - - // threadCount defines the number of threads created per HAProxy process. - // Creating more threads allows each ingress controller pod to handle more - // connections, at the cost of more system resources being used. HAProxy - // currently supports up to 64 threads. If this field is empty, the - // IngressController will use the default value. The current default is 4 - // threads, but this may change in future releases. - // - // Setting this field is generally not recommended. Increasing the number - // of HAProxy threads allows ingress controller pods to utilize more CPU - // time under load, potentially starving other pods if set too high. - // Reducing the number of threads may cause the ingress controller to - // perform poorly. - // - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Maximum=64 - // +optional - ThreadCount int32 `json:"threadCount,omitempty"` - - // clientTimeout defines how long a connection will be held open while - // waiting for a client response. - // - // If unset, the default timeout is 30s - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Format=duration - // +optional - ClientTimeout *metav1.Duration `json:"clientTimeout,omitempty"` - - // clientFinTimeout defines how long a connection will be held open while - // waiting for the client response to the server/backend closing the - // connection. - // - // If unset, the default timeout is 1s - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Format=duration - // +optional - ClientFinTimeout *metav1.Duration `json:"clientFinTimeout,omitempty"` - - // serverTimeout defines how long a connection will be held open while - // waiting for a server/backend response. - // - // If unset, the default timeout is 30s - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Format=duration - // +optional - ServerTimeout *metav1.Duration `json:"serverTimeout,omitempty"` - - // serverFinTimeout defines how long a connection will be held open while - // waiting for the server/backend response to the client closing the - // connection. - // - // If unset, the default timeout is 1s - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Format=duration - // +optional - ServerFinTimeout *metav1.Duration `json:"serverFinTimeout,omitempty"` - - // tunnelTimeout defines how long a tunnel connection (including - // websockets) will be held open while the tunnel is idle. - // - // If unset, the default timeout is 1h - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Format=duration - // +optional - TunnelTimeout *metav1.Duration `json:"tunnelTimeout,omitempty"` - - // tlsInspectDelay defines how long the router can hold data to find a - // matching route. - // - // Setting this too short can cause the router to fall back to the default - // certificate for edge-terminated or reencrypt routes even when a better - // matching certificate could be used. - // - // If unset, the default inspect delay is 5s - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Format=duration - // +optional - TLSInspectDelay *metav1.Duration `json:"tlsInspectDelay,omitempty"` - - // healthCheckInterval defines how long the router waits between two consecutive - // health checks on its configured backends. This value is applied globally as - // a default for all routes, but may be overridden per-route by the route annotation - // "router.openshift.io/haproxy.health.check.interval". - // - // Expects an unsigned duration string of decimal numbers, each with optional - // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". - // Valid time units are "ns", "us" (or "µs" U+00B5 or "μs" U+03BC), "ms", "s", "m", "h". - // - // Setting this to less than 5s can cause excess traffic due to too frequent - // TCP health checks and accompanying SYN packet storms. Alternatively, setting - // this too high can result in increased latency, due to backend servers that are no - // longer available, but haven't yet been detected as such. - // - // An empty or zero healthCheckInterval means no opinion and IngressController chooses - // a default, which is subject to change over time. - // Currently the default healthCheckInterval value is 5s. - // - // Currently the minimum allowed value is 1s and the maximum allowed value is - // 2147483647ms (24.85 days). Both are subject to change over time. - // - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Pattern=^0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+$ - // +kubebuilder:validation:Type:=string - // +optional - HealthCheckInterval *metav1.Duration `json:"healthCheckInterval,omitempty"` - - // maxConnections defines the maximum number of simultaneous - // connections that can be established per HAProxy process. - // Increasing this value allows each ingress controller pod to - // handle more connections but at the cost of additional - // system resources being consumed. - // - // Permitted values are: empty, 0, -1, and the range - // 2000-2000000. - // - // If this field is empty or 0, the IngressController will use - // the default value of 20000, but the default is subject to - // change in future releases. - // - // If the value is -1 then HAProxy will dynamically compute a - // maximum value based on the available ulimits in the running - // container. Selecting -1 (i.e., auto) will result in a large - // value being computed (~520000 on OpenShift >=4.10 clusters) - // and therefore each HAProxy process will incur significant - // memory usage compared to the current default of 20000. - // - // Setting a value that is greater than the current operating - // system limit will prevent the HAProxy process from - // starting. - // - // If you choose a discrete value (e.g., 750000) and the - // router pod is migrated to a new node, there's no guarantee - // that that new node has identical ulimits configured. In - // such a scenario the pod would fail to start. If you have - // nodes with different ulimits configured (e.g., different - // tuned profiles) and you choose a discrete value then the - // guidance is to use -1 and let the value be computed - // dynamically at runtime. - // - // You can monitor memory usage for router containers with the - // following metric: - // 'container_memory_working_set_bytes{container="router",namespace="openshift-ingress"}'. - // - // You can monitor memory usage of individual HAProxy - // processes in router containers with the following metric: - // 'container_memory_working_set_bytes{container="router",namespace="openshift-ingress"}/container_processes{container="router",namespace="openshift-ingress"}'. - // - // +kubebuilder:validation:Optional - // +optional - MaxConnections int32 `json:"maxConnections,omitempty"` -} - -// HTTPEmptyRequestsPolicy indicates how HTTP connections for which no request -// is received should be handled. -// +kubebuilder:validation:Enum=Respond;Ignore -type HTTPEmptyRequestsPolicy string - -const ( - // HTTPEmptyRequestsPolicyRespond indicates that the ingress controller - // should respond to empty requests. - HTTPEmptyRequestsPolicyRespond HTTPEmptyRequestsPolicy = "Respond" - // HTTPEmptyRequestsPolicyIgnore indicates that the ingress controller - // should ignore empty requests. - HTTPEmptyRequestsPolicyIgnore HTTPEmptyRequestsPolicy = "Ignore" -) - -var ( - // Available indicates the ingress controller deployment is available. - IngressControllerAvailableConditionType = "Available" - // LoadBalancerManaged indicates the management status of any load balancer - // service associated with an ingress controller. - LoadBalancerManagedIngressConditionType = "LoadBalancerManaged" - // LoadBalancerReady indicates the ready state of any load balancer service - // associated with an ingress controller. - LoadBalancerReadyIngressConditionType = "LoadBalancerReady" - // DNSManaged indicates the management status of any DNS records for the - // ingress controller. - DNSManagedIngressConditionType = "DNSManaged" - // DNSReady indicates the ready state of any DNS records for the ingress - // controller. - DNSReadyIngressConditionType = "DNSReady" -) - -// IngressControllerStatus defines the observed status of the IngressController. -type IngressControllerStatus struct { - // availableReplicas is number of observed available replicas according to the - // ingress controller deployment. - AvailableReplicas int32 `json:"availableReplicas"` - - // selector is a label selector, in string format, for ingress controller pods - // corresponding to the IngressController. The number of matching pods should - // equal the value of availableReplicas. - Selector string `json:"selector"` - - // domain is the actual domain in use. - Domain string `json:"domain"` - - // endpointPublishingStrategy is the actual strategy in use. - EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` - - // conditions is a list of conditions and their status. - // - // Available means the ingress controller deployment is available and - // servicing route and ingress resources (i.e, .status.availableReplicas - // equals .spec.replicas) - // - // There are additional conditions which indicate the status of other - // ingress controller features and capabilities. - // - // * LoadBalancerManaged - // - True if the following conditions are met: - // * The endpoint publishing strategy requires a service load balancer. - // - False if any of those conditions are unsatisfied. - // - // * LoadBalancerReady - // - True if the following conditions are met: - // * A load balancer is managed. - // * The load balancer is ready. - // - False if any of those conditions are unsatisfied. - // - // * DNSManaged - // - True if the following conditions are met: - // * The endpoint publishing strategy and platform support DNS. - // * The ingress controller domain is set. - // * dns.config.openshift.io/cluster configures DNS zones. - // - False if any of those conditions are unsatisfied. - // - // * DNSReady - // - True if the following conditions are met: - // * DNS is managed. - // * DNS records have been successfully created. - // - False if any of those conditions are unsatisfied. - Conditions []OperatorCondition `json:"conditions,omitempty"` - - // tlsProfile is the TLS connection configuration that is in effect. - // +optional - TLSProfile *configv1.TLSProfileSpec `json:"tlsProfile,omitempty"` - - // observedGeneration is the most recent generation observed. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` - - // namespaceSelector is the actual namespaceSelector in use. - // +optional - NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` - - // routeSelector is the actual routeSelector in use. - // +optional - RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -// IngressControllerList contains a list of IngressControllers. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type IngressControllerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []IngressController `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go deleted file mode 100644 index b4e45c960..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go +++ /dev/null @@ -1,50 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeAPIServer provides information to configure an operator to manage kube-apiserver. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:compatibility-gen:level=1 -type KubeAPIServer struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - // spec is the specification of the desired behavior of the Kubernetes API Server - // +kubebuilder:validation:Required - // +required - Spec KubeAPIServerSpec `json:"spec"` - - // status is the most recently observed status of the Kubernetes API Server - // +optional - Status KubeAPIServerStatus `json:"status"` -} - -type KubeAPIServerSpec struct { - StaticPodOperatorSpec `json:",inline"` -} - -type KubeAPIServerStatus struct { - StaticPodOperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeAPIServerList is a collection of items -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type KubeAPIServerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - // Items contains the items - Items []KubeAPIServer `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go deleted file mode 100644 index e07d26f17..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go +++ /dev/null @@ -1,57 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeControllerManager provides information to configure an operator to manage kube-controller-manager. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type KubeControllerManager struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - // spec is the specification of the desired behavior of the Kubernetes Controller Manager - // +kubebuilder:validation:Required - // +required - Spec KubeControllerManagerSpec `json:"spec"` - - // status is the most recently observed status of the Kubernetes Controller Manager - // +optional - Status KubeControllerManagerStatus `json:"status"` -} - -type KubeControllerManagerSpec struct { - StaticPodOperatorSpec `json:",inline"` - - // useMoreSecureServiceCA indicates that the service-ca.crt provided in SA token volumes should include only - // enough certificates to validate service serving certificates. - // Once set to true, it cannot be set to false. - // Even if someone finds a way to set it back to false, the service-ca.crt files that previously existed will - // only have the more secure content. - // +kubebuilder:default=false - UseMoreSecureServiceCA bool `json:"useMoreSecureServiceCA"` -} - -type KubeControllerManagerStatus struct { - StaticPodOperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeControllerManagerList is a collection of items -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type KubeControllerManagerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - // Items contains the items - Items []KubeControllerManager `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go deleted file mode 100644 index b187efc83..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go +++ /dev/null @@ -1,46 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type KubeStorageVersionMigrator struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - // +kubebuilder:validation:Required - // +required - Spec KubeStorageVersionMigratorSpec `json:"spec"` - // +optional - Status KubeStorageVersionMigratorStatus `json:"status"` -} - -type KubeStorageVersionMigratorSpec struct { - OperatorSpec `json:",inline"` -} - -type KubeStorageVersionMigratorStatus struct { - OperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeStorageVersionMigratorList is a collection of items -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type KubeStorageVersionMigratorList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - // Items contains the items - Items []KubeStorageVersionMigrator `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go deleted file mode 100644 index e3bd64b0e..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ /dev/null @@ -1,594 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Network describes the cluster's desired network configuration. It is -// consumed by the cluster-network-operator. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +k8s:openapi-gen=true -// +openshift:compatibility-gen:level=1 -type Network struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NetworkSpec `json:"spec,omitempty"` - Status NetworkStatus `json:"status,omitempty"` -} - -// NetworkStatus is detailed operator status, which is distilled -// up to the Network clusteroperator object. -type NetworkStatus struct { - OperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NetworkList contains a list of Network configurations -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type NetworkList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Network `json:"items"` -} - -// NetworkSpec is the top-level network configuration object. -type NetworkSpec struct { - OperatorSpec `json:",inline"` - - // clusterNetwork is the IP address pool to use for pod IPs. - // Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. - // Others only support one. This is equivalent to the cluster-cidr. - ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` - - // serviceNetwork is the ip address pool to use for Service IPs - // Currently, all existing network providers only support a single value - // here, but this is an array to allow for growth. - ServiceNetwork []string `json:"serviceNetwork"` - - // defaultNetwork is the "default" network that all pods will receive - DefaultNetwork DefaultNetworkDefinition `json:"defaultNetwork"` - - // additionalNetworks is a list of extra networks to make available to pods - // when multiple networks are enabled. - AdditionalNetworks []AdditionalNetworkDefinition `json:"additionalNetworks,omitempty"` - - // disableMultiNetwork specifies whether or not multiple pod network - // support should be disabled. If unset, this property defaults to - // 'false' and multiple network support is enabled. - DisableMultiNetwork *bool `json:"disableMultiNetwork,omitempty"` - - // useMultiNetworkPolicy enables a controller which allows for - // MultiNetworkPolicy objects to be used on additional networks as - // created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy - // objects, but NetworkPolicy objects only apply to the primary interface. - // With MultiNetworkPolicy, you can control the traffic that a pod can receive - // over the secondary interfaces. If unset, this property defaults to 'false' - // and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is - // 'true' then the value of this field is ignored. - UseMultiNetworkPolicy *bool `json:"useMultiNetworkPolicy,omitempty"` - - // deployKubeProxy specifies whether or not a standalone kube-proxy should - // be deployed by the operator. Some network providers include kube-proxy - // or similar functionality. If unset, the plugin will attempt to select - // the correct value, which is false when OpenShift SDN and ovn-kubernetes are - // used and true otherwise. - // +optional - DeployKubeProxy *bool `json:"deployKubeProxy,omitempty"` - - // disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck - // CRs from a test pod to every node, apiserver and LB should be disabled or not. - // If unset, this property defaults to 'false' and network diagnostics is enabled. - // Setting this to 'true' would reduce the additional load of the pods performing the checks. - // +optional - // +kubebuilder:default:=false - DisableNetworkDiagnostics bool `json:"disableNetworkDiagnostics"` - - // kubeProxyConfig lets us configure desired proxy configuration. - // If not specified, sensible defaults will be chosen by OpenShift directly. - // Not consumed by all network providers - currently only openshift-sdn. - KubeProxyConfig *ProxyConfig `json:"kubeProxyConfig,omitempty"` - - // exportNetworkFlows enables and configures the export of network flow metadata from the pod network - // by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. - // If unset, flows will not be exported to any collector. - // +optional - ExportNetworkFlows *ExportNetworkFlows `json:"exportNetworkFlows,omitempty"` - - // migration enables and configures the cluster network migration. The - // migration procedure allows to change the network type and the MTU. - // +optional - Migration *NetworkMigration `json:"migration,omitempty"` -} - -// NetworkMigration represents the cluster network configuration. -type NetworkMigration struct { - // networkType is the target type of network migration. Set this to the - // target network type to allow changing the default network. If unset, the - // operation of changing cluster default network plugin will be rejected. - // The supported values are OpenShiftSDN, OVNKubernetes - // +optional - NetworkType string `json:"networkType,omitempty"` - - // mtu contains the MTU migration configuration. Set this to allow changing - // the MTU values for the default network. If unset, the operation of - // changing the MTU for the default network will be rejected. - // +optional - MTU *MTUMigration `json:"mtu,omitempty"` -} - -// MTUMigration MTU contains infomation about MTU migration. -type MTUMigration struct { - // network contains information about MTU migration for the default network. - // Migrations are only allowed to MTU values lower than the machine's uplink - // MTU by the minimum appropriate offset. - // +optional - Network *MTUMigrationValues `json:"network,omitempty"` - - // machine contains MTU migration configuration for the machine's uplink. - // Needs to be migrated along with the default network MTU unless the - // current uplink MTU already accommodates the default network MTU. - // +optional - Machine *MTUMigrationValues `json:"machine,omitempty"` -} - -// MTUMigrationValues contains the values for a MTU migration. -type MTUMigrationValues struct { - // to is the MTU to migrate to. - // +kubebuilder:validation:Minimum=0 - To *uint32 `json:"to"` - - // from is the MTU to migrate from. - // +kubebuilder:validation:Minimum=0 - // +optional - From *uint32 `json:"from,omitempty"` -} - -// ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size -// HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If -// the HostPrefix field is not used by the plugin, it can be left unset. -// Not all network providers support multiple ClusterNetworks -type ClusterNetworkEntry struct { - CIDR string `json:"cidr"` - // +kubebuilder:validation:Minimum=0 - // +optional - HostPrefix uint32 `json:"hostPrefix,omitempty"` -} - -// DefaultNetworkDefinition represents a single network plugin's configuration. -// type must be specified, along with exactly one "Config" that matches the type. -type DefaultNetworkDefinition struct { - // type is the type of network - // All NetworkTypes are supported except for NetworkTypeRaw - Type NetworkType `json:"type"` - - // openShiftSDNConfig configures the openshift-sdn plugin - // +optional - OpenShiftSDNConfig *OpenShiftSDNConfig `json:"openshiftSDNConfig,omitempty"` - - // ovnKubernetesConfig configures the ovn-kubernetes plugin. - // +optional - OVNKubernetesConfig *OVNKubernetesConfig `json:"ovnKubernetesConfig,omitempty"` - - // KuryrConfig configures the kuryr plugin - // +optional - KuryrConfig *KuryrConfig `json:"kuryrConfig,omitempty"` -} - -// SimpleMacvlanConfig contains configurations for macvlan interface. -type SimpleMacvlanConfig struct { - // master is the host interface to create the macvlan interface from. - // If not specified, it will be default route interface - // +optional - Master string `json:"master,omitempty"` - - // IPAMConfig configures IPAM module will be used for IP Address Management (IPAM). - // +optional - IPAMConfig *IPAMConfig `json:"ipamConfig,omitempty"` - - // mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge - // +optional - Mode MacvlanMode `json:"mode,omitempty"` - - // mtu is the mtu to use for the macvlan interface. if unset, host's - // kernel will select the value. - // +kubebuilder:validation:Minimum=0 - // +optional - MTU uint32 `json:"mtu,omitempty"` -} - -// StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses -type StaticIPAMAddresses struct { - // Address is the IP address in CIDR format - // +optional - Address string `json:"address"` - // Gateway is IP inside of subnet to designate as the gateway - // +optional - Gateway string `json:"gateway,omitempty"` -} - -// StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes -type StaticIPAMRoutes struct { - // Destination points the IP route destination - Destination string `json:"destination"` - // Gateway is the route's next-hop IP address - // If unset, a default gateway is assumed (as determined by the CNI plugin). - // +optional - Gateway string `json:"gateway,omitempty"` -} - -// StaticIPAMDNS provides DNS related information for static IPAM -type StaticIPAMDNS struct { - // Nameservers points DNS servers for IP lookup - // +optional - Nameservers []string `json:"nameservers,omitempty"` - // Domain configures the domainname the local domain used for short hostname lookups - // +optional - Domain string `json:"domain,omitempty"` - // Search configures priority ordered search domains for short hostname lookups - // +optional - Search []string `json:"search,omitempty"` -} - -// StaticIPAMConfig contains configurations for static IPAM (IP Address Management) -type StaticIPAMConfig struct { - // Addresses configures IP address for the interface - // +optional - Addresses []StaticIPAMAddresses `json:"addresses,omitempty"` - // Routes configures IP routes for the interface - // +optional - Routes []StaticIPAMRoutes `json:"routes,omitempty"` - // DNS configures DNS for the interface - // +optional - DNS *StaticIPAMDNS `json:"dns,omitempty"` -} - -// IPAMConfig contains configurations for IPAM (IP Address Management) -type IPAMConfig struct { - // Type is the type of IPAM module will be used for IP Address Management(IPAM). - // The supported values are IPAMTypeDHCP, IPAMTypeStatic - Type IPAMType `json:"type"` - - // StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic - // +optional - StaticIPAMConfig *StaticIPAMConfig `json:"staticIPAMConfig,omitempty"` -} - -// AdditionalNetworkDefinition configures an extra network that is available but not -// created by default. Instead, pods must request them by name. -// type must be specified, along with exactly one "Config" that matches the type. -type AdditionalNetworkDefinition struct { - // type is the type of network - // The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan - Type NetworkType `json:"type"` - - // name is the name of the network. This will be populated in the resulting CRD - // This must be unique. - Name string `json:"name"` - - // namespace is the namespace of the network. This will be populated in the resulting CRD - // If not given the network will be created in the default namespace. - Namespace string `json:"namespace,omitempty"` - - // rawCNIConfig is the raw CNI configuration json to create in the - // NetworkAttachmentDefinition CRD - RawCNIConfig string `json:"rawCNIConfig,omitempty"` - - // SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan - // +optional - SimpleMacvlanConfig *SimpleMacvlanConfig `json:"simpleMacvlanConfig,omitempty"` -} - -// OpenShiftSDNConfig configures the three openshift-sdn plugins -type OpenShiftSDNConfig struct { - // mode is one of "Multitenant", "Subnet", or "NetworkPolicy" - Mode SDNMode `json:"mode"` - - // vxlanPort is the port to use for all vxlan packets. The default is 4789. - // +kubebuilder:validation:Minimum=0 - // +optional - VXLANPort *uint32 `json:"vxlanPort,omitempty"` - - // mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. - // This must be 50 bytes smaller than the machine's uplink. - // +kubebuilder:validation:Minimum=0 - // +optional - MTU *uint32 `json:"mtu,omitempty"` - - // useExternalOpenvswitch used to control whether the operator would deploy an OVS - // DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always - // run as a system service, and this flag is ignored. - // DEPRECATED: non-functional as of 4.6 - // +optional - UseExternalOpenvswitch *bool `json:"useExternalOpenvswitch,omitempty"` - - // enableUnidling controls whether or not the service proxy will support idling - // and unidling of services. By default, unidling is enabled. - EnableUnidling *bool `json:"enableUnidling,omitempty"` -} - -// KuryrConfig configures the Kuryr-Kubernetes SDN -type KuryrConfig struct { - // The port kuryr-daemon will listen for readiness and liveness requests. - // +kubebuilder:validation:Minimum=0 - // +optional - DaemonProbesPort *uint32 `json:"daemonProbesPort,omitempty"` - - // The port kuryr-controller will listen for readiness and liveness requests. - // +kubebuilder:validation:Minimum=0 - // +optional - ControllerProbesPort *uint32 `json:"controllerProbesPort,omitempty"` - - // openStackServiceNetwork contains the CIDR of network from which to allocate IPs for - // OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses - // two IPs from that network for each loadbalancer - one given by OpenShift and second - // for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's - // IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` - // needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` - // must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then - // make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that - // are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set - // cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix - // size by 1. - // +optional - OpenStackServiceNetwork string `json:"openStackServiceNetwork,omitempty"` - - // enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port - // pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact - // that it takes a significant amount of time to create one. It creates a number of ports when - // the first pod that is configured to use the dedicated network for pods is created in a namespace, - // and keeps them ready to be attached to pods. Port prepopulation is disabled by default. - // +optional - EnablePortPoolsPrepopulation bool `json:"enablePortPoolsPrepopulation,omitempty"` - - // poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. - // If the number of ports exceeds this setting, free ports will get deleted. Setting 0 - // will disable this upper bound, effectively preventing pools from shrinking and this - // is the default value. For more information about port pools see - // enablePortPoolsPrepopulation setting. - // +kubebuilder:validation:Minimum=0 - // +optional - PoolMaxPorts uint `json:"poolMaxPorts,omitempty"` - - // poolMinPorts sets a minimum number of free ports that should be kept in a port pool. - // If the number of ports is lower than this setting, new ports will get created and - // added to pool. The default is 1. For more information about port pools see - // enablePortPoolsPrepopulation setting. - // +kubebuilder:validation:Minimum=1 - // +optional - PoolMinPorts uint `json:"poolMinPorts,omitempty"` - - // poolBatchPorts sets a number of ports that should be created in a single batch request - // to extend the port pool. The default is 3. For more information about port pools see - // enablePortPoolsPrepopulation setting. - // +kubebuilder:validation:Minimum=0 - // +optional - PoolBatchPorts *uint `json:"poolBatchPorts,omitempty"` - - // mtu is the MTU that Kuryr should use when creating pod networks in Neutron. - // The value has to be lower or equal to the MTU of the nodes network and Neutron has - // to allow creation of tenant networks with such MTU. If unset Pod networks will be - // created with the same MTU as the nodes network has. - // +kubebuilder:validation:Minimum=0 - // +optional - MTU *uint32 `json:"mtu,omitempty"` -} - -// ovnKubernetesConfig contains the configuration parameters for networks -// using the ovn-kubernetes network project -type OVNKubernetesConfig struct { - // mtu is the MTU to use for the tunnel interface. This must be 100 - // bytes smaller than the uplink mtu. - // Default is 1400 - // +kubebuilder:validation:Minimum=0 - // +optional - MTU *uint32 `json:"mtu,omitempty"` - // geneve port is the UDP port to be used by geneve encapulation. - // Default is 6081 - // +kubebuilder:validation:Minimum=1 - // +optional - GenevePort *uint32 `json:"genevePort,omitempty"` - // HybridOverlayConfig configures an additional overlay network for peers that are - // not using OVN. - // +optional - HybridOverlayConfig *HybridOverlayConfig `json:"hybridOverlayConfig,omitempty"` - // ipsecConfig enables and configures IPsec for pods on the pod network within the - // cluster. - // +optional - IPsecConfig *IPsecConfig `json:"ipsecConfig,omitempty"` - // policyAuditConfig is the configuration for network policy audit events. If unset, - // reported defaults are used. - // +optional - PolicyAuditConfig *PolicyAuditConfig `json:"policyAuditConfig,omitempty"` - // gatewayConfig holds the configuration for node gateway options. - // +optional - GatewayConfig *GatewayConfig `json:"gatewayConfig,omitempty"` -} - -type HybridOverlayConfig struct { - // HybridClusterNetwork defines a network space given to nodes on an additional overlay network. - HybridClusterNetwork []ClusterNetworkEntry `json:"hybridClusterNetwork"` - // HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. - // Default is 4789 - // +optional - HybridOverlayVXLANPort *uint32 `json:"hybridOverlayVXLANPort,omitempty"` -} - -type IPsecConfig struct { -} - -// GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides -type GatewayConfig struct { - // RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port - // into the host before sending it out. If this is not set, traffic will always egress directly - // from OVN to outside without touching the host stack. Setting this to true means hardware - // offload will not be supported. Default is false if GatewayConfig is specified. - // +kubebuilder:default:=false - // +optional - RoutingViaHost bool `json:"routingViaHost,omitempty"` -} - -type ExportNetworkFlows struct { - // netFlow defines the NetFlow configuration. - // +optional - NetFlow *NetFlowConfig `json:"netFlow,omitempty"` - // sFlow defines the SFlow configuration. - // +optional - SFlow *SFlowConfig `json:"sFlow,omitempty"` - // ipfix defines IPFIX configuration. - // +optional - IPFIX *IPFIXConfig `json:"ipfix,omitempty"` -} - -type NetFlowConfig struct { - // netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. - // It is a list of strings formatted as ip:port with a maximum of ten items - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:MaxItems=10 - Collectors []IPPort `json:"collectors,omitempty"` -} - -type SFlowConfig struct { - // sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:MaxItems=10 - Collectors []IPPort `json:"collectors,omitempty"` -} - -type IPFIXConfig struct { - // ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:MaxItems=10 - Collectors []IPPort `json:"collectors,omitempty"` -} - -// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` -type IPPort string - -type PolicyAuditConfig struct { - // rateLimit is the approximate maximum number of messages to generate per-second per-node. If - // unset the default of 20 msg/sec is used. - // +kubebuilder:default=20 - // +kubebuilder:validation:Minimum=1 - // +optional - RateLimit *uint32 `json:"rateLimit,omitempty"` - - // maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs - // Units are in MB and the Default is 50MB - // +kubebuilder:default=50 - // +kubebuilder:validation:Minimum=1 - // +optional - MaxFileSize *uint32 `json:"maxFileSize,omitempty"` - - // destination is the location for policy log messages. - // Regardless of this config, persistent logs will always be dumped to the host - // at /var/log/ovn/ however - // Additionally syslog output may be configured as follows. - // Valid values are: - // - "libc" -> to use the libc syslog() function of the host node's journdald process - // - "udp:host:port" -> for sending syslog over UDP - // - "unix:file" -> for using the UNIX domain socket directly - // - "null" -> to discard all messages logged to syslog - // The default is "null" - // +kubebuilder:default=null - // +kubebuilder:pattern='^libc$|^null$|^udp:(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]):([0-9]){0,5}$|^unix:(\/[^\/ ]*)+([^\/\s])$' - // +optional - Destination string `json:"destination,omitempty"` - - // syslogFacility the RFC5424 facility for generated messages, e.g. "kern". Default is "local0" - // +kubebuilder:default=local0 - // +kubebuilder:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;clock;ftp;ntp;audit;alert;clock2;local0;local1;local2;local3;local4;local5;local6;local7 - // +optional - SyslogFacility string `json:"syslogFacility,omitempty"` -} - -// NetworkType describes the network plugin type to configure -type NetworkType string - -// ProxyArgumentList is a list of arguments to pass to the kubeproxy process -type ProxyArgumentList []string - -// ProxyConfig defines the configuration knobs for kubeproxy -// All of these are optional and have sensible defaults -type ProxyConfig struct { - // An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted - // in large clusters for performance reasons, but this is no longer necessary, and there is no reason - // to change this from the default value. - // Default: 30s - IptablesSyncPeriod string `json:"iptablesSyncPeriod,omitempty"` - - // The address to "bind" on - // Defaults to 0.0.0.0 - BindAddress string `json:"bindAddress,omitempty"` - - // Any additional arguments to pass to the kubeproxy process - ProxyArguments map[string]ProxyArgumentList `json:"proxyArguments,omitempty"` -} - -const ( - // NetworkTypeOpenShiftSDN means the openshift-sdn plugin will be configured - NetworkTypeOpenShiftSDN NetworkType = "OpenShiftSDN" - - // NetworkTypeOVNKubernetes means the ovn-kubernetes project will be configured. - // This is currently not implemented. - NetworkTypeOVNKubernetes NetworkType = "OVNKubernetes" - - // NetworkTypeKuryr means the kuryr-kubernetes project will be configured. - NetworkTypeKuryr NetworkType = "Kuryr" - - // NetworkTypeRaw - NetworkTypeRaw NetworkType = "Raw" - - // NetworkTypeSimpleMacvlan - NetworkTypeSimpleMacvlan NetworkType = "SimpleMacvlan" -) - -// SDNMode is the Mode the openshift-sdn plugin is in -type SDNMode string - -const ( - // SDNModeSubnet is a simple mode that offers no isolation between pods - SDNModeSubnet SDNMode = "Subnet" - - // SDNModeMultitenant is a special "multitenant" mode that offers limited - // isolation configuration between namespaces - SDNModeMultitenant SDNMode = "Multitenant" - - // SDNModeNetworkPolicy is a full NetworkPolicy implementation that allows - // for sophisticated network isolation and segmenting. This is the default. - SDNModeNetworkPolicy SDNMode = "NetworkPolicy" -) - -// MacvlanMode is the Mode of macvlan. The value are lowercase to match the CNI plugin -// config values. See "man ip-link" for its detail. -type MacvlanMode string - -const ( - // MacvlanModeBridge is the macvlan with thin bridge function. - MacvlanModeBridge MacvlanMode = "Bridge" - // MacvlanModePrivate - MacvlanModePrivate MacvlanMode = "Private" - // MacvlanModeVEPA is used with Virtual Ethernet Port Aggregator - // (802.1qbg) swtich - MacvlanModeVEPA MacvlanMode = "VEPA" - // MacvlanModePassthru - MacvlanModePassthru MacvlanMode = "Passthru" -) - -// IPAMType describes the IP address management type to configure -type IPAMType string - -const ( - // IPAMTypeDHCP uses DHCP for IP management - IPAMTypeDHCP IPAMType = "DHCP" - // IPAMTypeStatic uses static IP - IPAMTypeStatic IPAMType = "Static" -) diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go deleted file mode 100644 index 5511db364..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go +++ /dev/null @@ -1,56 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type OpenShiftAPIServer struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - // spec is the specification of the desired behavior of the OpenShift API Server. - // +kubebuilder:validation:Required - // +required - Spec OpenShiftAPIServerSpec `json:"spec"` - - // status defines the observed status of the OpenShift API Server. - // +optional - Status OpenShiftAPIServerStatus `json:"status"` -} - -type OpenShiftAPIServerSpec struct { - OperatorSpec `json:",inline"` -} - -type OpenShiftAPIServerStatus struct { - OperatorStatus `json:",inline"` - - // latestAvailableRevision is the latest revision used as suffix of revisioned - // secrets like encryption-config. A new revision causes a new deployment of - // pods. - // +optional - // +kubebuilder:validation:Minimum=0 - LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OpenShiftAPIServerList is a collection of items -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type OpenShiftAPIServerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - // Items contains the items - Items []OpenShiftAPIServer `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go deleted file mode 100644 index 442e40314..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go +++ /dev/null @@ -1,46 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type OpenShiftControllerManager struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - // +kubebuilder:validation:Required - // +required - Spec OpenShiftControllerManagerSpec `json:"spec"` - // +optional - Status OpenShiftControllerManagerStatus `json:"status"` -} - -type OpenShiftControllerManagerSpec struct { - OperatorSpec `json:",inline"` -} - -type OpenShiftControllerManagerStatus struct { - OperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OpenShiftControllerManagerList is a collection of items -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type OpenShiftControllerManagerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - // Items contains the items - Items []OpenShiftControllerManager `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go deleted file mode 100644 index 654f0d612..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go +++ /dev/null @@ -1,49 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeScheduler provides information to configure an operator to manage scheduler. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type KubeScheduler struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - // spec is the specification of the desired behavior of the Kubernetes Scheduler - // +kubebuilder:validation:Required - // +required - Spec KubeSchedulerSpec `json:"spec"` - - // status is the most recently observed status of the Kubernetes Scheduler - // +optional - Status KubeSchedulerStatus `json:"status"` -} - -type KubeSchedulerSpec struct { - StaticPodOperatorSpec `json:",inline"` -} - -type KubeSchedulerStatus struct { - StaticPodOperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeSchedulerList is a collection of items -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type KubeSchedulerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - // Items contains the items - Items []KubeScheduler `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go deleted file mode 100644 index a7404c4f2..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go +++ /dev/null @@ -1,48 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceCA provides information to configure an operator to manage the service cert controllers -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ServiceCA struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - //spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec ServiceCASpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status ServiceCAStatus `json:"status"` -} - -type ServiceCASpec struct { - OperatorSpec `json:",inline"` -} - -type ServiceCAStatus struct { - OperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceCAList is a collection of items -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ServiceCAList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - // Items contains the items - Items []ServiceCA `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go deleted file mode 100644 index 2d96e0240..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go +++ /dev/null @@ -1,48 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server -// DEPRECATED: will be removed in 4.6 -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ServiceCatalogAPIServer struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +kubebuilder:validation:Required - // +required - Spec ServiceCatalogAPIServerSpec `json:"spec"` - // +optional - Status ServiceCatalogAPIServerStatus `json:"status"` -} - -type ServiceCatalogAPIServerSpec struct { - OperatorSpec `json:",inline"` -} - -type ServiceCatalogAPIServerStatus struct { - OperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceCatalogAPIServerList is a collection of items -// DEPRECATED: will be removed in 4.6 -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ServiceCatalogAPIServerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - // Items contains the items - Items []ServiceCatalogAPIServer `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go deleted file mode 100644 index 1317487e6..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go +++ /dev/null @@ -1,48 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager -// DEPRECATED: will be removed in 4.6 -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ServiceCatalogControllerManager struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - // +kubebuilder:validation:Required - // +required - Spec ServiceCatalogControllerManagerSpec `json:"spec"` - // +optional - Status ServiceCatalogControllerManagerStatus `json:"status"` -} - -type ServiceCatalogControllerManagerSpec struct { - OperatorSpec `json:",inline"` -} - -type ServiceCatalogControllerManagerStatus struct { - OperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceCatalogControllerManagerList is a collection of items -// DEPRECATED: will be removed in 4.6 -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ServiceCatalogControllerManagerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - // Items contains the items - Items []ServiceCatalogControllerManager `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/types_storage.go b/vendor/github.com/openshift/api/operator/v1/types_storage.go deleted file mode 100644 index 38ffe26d5..000000000 --- a/vendor/github.com/openshift/api/operator/v1/types_storage.go +++ /dev/null @@ -1,50 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type Storage struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec StorageSpec `json:"spec"` - - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status StorageStatus `json:"status"` -} - -// StorageSpec is the specification of the desired behavior of the cluster storage operator. -type StorageSpec struct { - OperatorSpec `json:",inline"` -} - -// StorageStatus defines the observed status of the cluster storage operator. -type StorageStatus struct { - OperatorStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -// StorageList contains a list of Storages. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type StorageList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Storage `json:"items"` -} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go deleted file mode 100644 index d3cb1ae63..000000000 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,3813 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - configv1 "github.com/openshift/api/config/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSClassicLoadBalancerParameters) DeepCopyInto(out *AWSClassicLoadBalancerParameters) { - *out = *in - out.ConnectionIdleTimeout = in.ConnectionIdleTimeout - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClassicLoadBalancerParameters. -func (in *AWSClassicLoadBalancerParameters) DeepCopy() *AWSClassicLoadBalancerParameters { - if in == nil { - return nil - } - out := new(AWSClassicLoadBalancerParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSLoadBalancerParameters) DeepCopyInto(out *AWSLoadBalancerParameters) { - *out = *in - if in.ClassicLoadBalancerParameters != nil { - in, out := &in.ClassicLoadBalancerParameters, &out.ClassicLoadBalancerParameters - *out = new(AWSClassicLoadBalancerParameters) - **out = **in - } - if in.NetworkLoadBalancerParameters != nil { - in, out := &in.NetworkLoadBalancerParameters, &out.NetworkLoadBalancerParameters - *out = new(AWSNetworkLoadBalancerParameters) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSLoadBalancerParameters. -func (in *AWSLoadBalancerParameters) DeepCopy() *AWSLoadBalancerParameters { - if in == nil { - return nil - } - out := new(AWSLoadBalancerParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSNetworkLoadBalancerParameters) DeepCopyInto(out *AWSNetworkLoadBalancerParameters) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSNetworkLoadBalancerParameters. -func (in *AWSNetworkLoadBalancerParameters) DeepCopy() *AWSNetworkLoadBalancerParameters { - if in == nil { - return nil - } - out := new(AWSNetworkLoadBalancerParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AccessLogging) DeepCopyInto(out *AccessLogging) { - *out = *in - in.Destination.DeepCopyInto(&out.Destination) - in.HTTPCaptureHeaders.DeepCopyInto(&out.HTTPCaptureHeaders) - if in.HTTPCaptureCookies != nil { - in, out := &in.HTTPCaptureCookies, &out.HTTPCaptureCookies - *out = make([]IngressControllerCaptureHTTPCookie, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogging. -func (in *AccessLogging) DeepCopy() *AccessLogging { - if in == nil { - return nil - } - out := new(AccessLogging) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AddPage) DeepCopyInto(out *AddPage) { - *out = *in - if in.DisabledActions != nil { - in, out := &in.DisabledActions, &out.DisabledActions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddPage. -func (in *AddPage) DeepCopy() *AddPage { - if in == nil { - return nil - } - out := new(AddPage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdditionalNetworkDefinition) DeepCopyInto(out *AdditionalNetworkDefinition) { - *out = *in - if in.SimpleMacvlanConfig != nil { - in, out := &in.SimpleMacvlanConfig, &out.SimpleMacvlanConfig - *out = new(SimpleMacvlanConfig) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalNetworkDefinition. -func (in *AdditionalNetworkDefinition) DeepCopy() *AdditionalNetworkDefinition { - if in == nil { - return nil - } - out := new(AdditionalNetworkDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Authentication) DeepCopyInto(out *Authentication) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. -func (in *Authentication) DeepCopy() *Authentication { - if in == nil { - return nil - } - out := new(Authentication) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Authentication) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Authentication, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList. -func (in *AuthenticationList) DeepCopy() *AuthenticationList { - if in == nil { - return nil - } - out := new(AuthenticationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AuthenticationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. -func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { - if in == nil { - return nil - } - out := new(AuthenticationSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { - *out = *in - out.OAuthAPIServer = in.OAuthAPIServer - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus. -func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { - if in == nil { - return nil - } - out := new(AuthenticationStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSISnapshotController) DeepCopyInto(out *CSISnapshotController) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotController. -func (in *CSISnapshotController) DeepCopy() *CSISnapshotController { - if in == nil { - return nil - } - out := new(CSISnapshotController) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSISnapshotController) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSISnapshotControllerList) DeepCopyInto(out *CSISnapshotControllerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CSISnapshotController, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerList. -func (in *CSISnapshotControllerList) DeepCopy() *CSISnapshotControllerList { - if in == nil { - return nil - } - out := new(CSISnapshotControllerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSISnapshotControllerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSISnapshotControllerSpec) DeepCopyInto(out *CSISnapshotControllerSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerSpec. -func (in *CSISnapshotControllerSpec) DeepCopy() *CSISnapshotControllerSpec { - if in == nil { - return nil - } - out := new(CSISnapshotControllerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSISnapshotControllerStatus) DeepCopyInto(out *CSISnapshotControllerStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerStatus. -func (in *CSISnapshotControllerStatus) DeepCopy() *CSISnapshotControllerStatus { - if in == nil { - return nil - } - out := new(CSISnapshotControllerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientTLS) DeepCopyInto(out *ClientTLS) { - *out = *in - out.ClientCA = in.ClientCA - if in.AllowedSubjectPatterns != nil { - in, out := &in.AllowedSubjectPatterns, &out.AllowedSubjectPatterns - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientTLS. -func (in *ClientTLS) DeepCopy() *ClientTLS { - if in == nil { - return nil - } - out := new(ClientTLS) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloudCredential) DeepCopyInto(out *CloudCredential) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredential. -func (in *CloudCredential) DeepCopy() *CloudCredential { - if in == nil { - return nil - } - out := new(CloudCredential) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CloudCredential) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloudCredentialList) DeepCopyInto(out *CloudCredentialList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CloudCredential, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialList. -func (in *CloudCredentialList) DeepCopy() *CloudCredentialList { - if in == nil { - return nil - } - out := new(CloudCredentialList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CloudCredentialList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloudCredentialSpec) DeepCopyInto(out *CloudCredentialSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialSpec. -func (in *CloudCredentialSpec) DeepCopy() *CloudCredentialSpec { - if in == nil { - return nil - } - out := new(CloudCredentialSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloudCredentialStatus) DeepCopyInto(out *CloudCredentialStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialStatus. -func (in *CloudCredentialStatus) DeepCopy() *CloudCredentialStatus { - if in == nil { - return nil - } - out := new(CloudCredentialStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCSIDriver) DeepCopyInto(out *ClusterCSIDriver) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriver. -func (in *ClusterCSIDriver) DeepCopy() *ClusterCSIDriver { - if in == nil { - return nil - } - out := new(ClusterCSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCSIDriver) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCSIDriverList) DeepCopyInto(out *ClusterCSIDriverList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterCSIDriver, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverList. -func (in *ClusterCSIDriverList) DeepCopy() *ClusterCSIDriverList { - if in == nil { - return nil - } - out := new(ClusterCSIDriverList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCSIDriverList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCSIDriverSpec) DeepCopyInto(out *ClusterCSIDriverSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverSpec. -func (in *ClusterCSIDriverSpec) DeepCopy() *ClusterCSIDriverSpec { - if in == nil { - return nil - } - out := new(ClusterCSIDriverSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCSIDriverStatus) DeepCopyInto(out *ClusterCSIDriverStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverStatus. -func (in *ClusterCSIDriverStatus) DeepCopy() *ClusterCSIDriverStatus { - if in == nil { - return nil - } - out := new(ClusterCSIDriverStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. -func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { - if in == nil { - return nil - } - out := new(ClusterNetworkEntry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Config) DeepCopyInto(out *Config) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. -func (in *Config) DeepCopy() *Config { - if in == nil { - return nil - } - out := new(Config) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Config) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigList) DeepCopyInto(out *ConfigList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Config, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. -func (in *ConfigList) DeepCopy() *ConfigList { - if in == nil { - return nil - } - out := new(ConfigList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec. -func (in *ConfigSpec) DeepCopy() *ConfigSpec { - if in == nil { - return nil - } - out := new(ConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. -func (in *ConfigStatus) DeepCopy() *ConfigStatus { - if in == nil { - return nil - } - out := new(ConfigStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Console) DeepCopyInto(out *Console) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console. -func (in *Console) DeepCopy() *Console { - if in == nil { - return nil - } - out := new(Console) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Console) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConsoleConfigRoute) DeepCopyInto(out *ConsoleConfigRoute) { - *out = *in - out.Secret = in.Secret - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleConfigRoute. -func (in *ConsoleConfigRoute) DeepCopy() *ConsoleConfigRoute { - if in == nil { - return nil - } - out := new(ConsoleConfigRoute) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConsoleCustomization) DeepCopyInto(out *ConsoleCustomization) { - *out = *in - out.CustomLogoFile = in.CustomLogoFile - in.DeveloperCatalog.DeepCopyInto(&out.DeveloperCatalog) - in.ProjectAccess.DeepCopyInto(&out.ProjectAccess) - in.QuickStarts.DeepCopyInto(&out.QuickStarts) - in.AddPage.DeepCopyInto(&out.AddPage) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleCustomization. -func (in *ConsoleCustomization) DeepCopy() *ConsoleCustomization { - if in == nil { - return nil - } - out := new(ConsoleCustomization) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConsoleList) DeepCopyInto(out *ConsoleList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Console, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList. -func (in *ConsoleList) DeepCopy() *ConsoleList { - if in == nil { - return nil - } - out := new(ConsoleList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConsoleList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConsoleProviders) DeepCopyInto(out *ConsoleProviders) { - *out = *in - if in.Statuspage != nil { - in, out := &in.Statuspage, &out.Statuspage - *out = new(StatuspageProvider) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleProviders. -func (in *ConsoleProviders) DeepCopy() *ConsoleProviders { - if in == nil { - return nil - } - out := new(ConsoleProviders) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - in.Customization.DeepCopyInto(&out.Customization) - in.Providers.DeepCopyInto(&out.Providers) - out.Route = in.Route - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec. -func (in *ConsoleSpec) DeepCopy() *ConsoleSpec { - if in == nil { - return nil - } - out := new(ConsoleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus. -func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { - if in == nil { - return nil - } - out := new(ConsoleStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerLoggingDestinationParameters) DeepCopyInto(out *ContainerLoggingDestinationParameters) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerLoggingDestinationParameters. -func (in *ContainerLoggingDestinationParameters) DeepCopy() *ContainerLoggingDestinationParameters { - if in == nil { - return nil - } - out := new(ContainerLoggingDestinationParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNS) DeepCopyInto(out *DNS) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. -func (in *DNS) DeepCopy() *DNS { - if in == nil { - return nil - } - out := new(DNS) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DNS) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNSList) DeepCopyInto(out *DNSList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DNS, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList. -func (in *DNSList) DeepCopy() *DNSList { - if in == nil { - return nil - } - out := new(DNSList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DNSList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNSNodePlacement) DeepCopyInto(out *DNSNodePlacement) { - *out = *in - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNodePlacement. -func (in *DNSNodePlacement) DeepCopy() *DNSNodePlacement { - if in == nil { - return nil - } - out := new(DNSNodePlacement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNSOverTLSConfig) DeepCopyInto(out *DNSOverTLSConfig) { - *out = *in - out.CABundle = in.CABundle - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOverTLSConfig. -func (in *DNSOverTLSConfig) DeepCopy() *DNSOverTLSConfig { - if in == nil { - return nil - } - out := new(DNSOverTLSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { - *out = *in - if in.Servers != nil { - in, out := &in.Servers, &out.Servers - *out = make([]Server, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.UpstreamResolvers.DeepCopyInto(&out.UpstreamResolvers) - in.NodePlacement.DeepCopyInto(&out.NodePlacement) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. -func (in *DNSSpec) DeepCopy() *DNSSpec { - if in == nil { - return nil - } - out := new(DNSSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNSStatus) DeepCopyInto(out *DNSStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]OperatorCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus. -func (in *DNSStatus) DeepCopy() *DNSStatus { - if in == nil { - return nil - } - out := new(DNSStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNSTransportConfig) DeepCopyInto(out *DNSTransportConfig) { - *out = *in - if in.TLS != nil { - in, out := &in.TLS, &out.TLS - *out = new(DNSOverTLSConfig) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSTransportConfig. -func (in *DNSTransportConfig) DeepCopy() *DNSTransportConfig { - if in == nil { - return nil - } - out := new(DNSTransportConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DefaultNetworkDefinition) DeepCopyInto(out *DefaultNetworkDefinition) { - *out = *in - if in.OpenShiftSDNConfig != nil { - in, out := &in.OpenShiftSDNConfig, &out.OpenShiftSDNConfig - *out = new(OpenShiftSDNConfig) - (*in).DeepCopyInto(*out) - } - if in.OVNKubernetesConfig != nil { - in, out := &in.OVNKubernetesConfig, &out.OVNKubernetesConfig - *out = new(OVNKubernetesConfig) - (*in).DeepCopyInto(*out) - } - if in.KuryrConfig != nil { - in, out := &in.KuryrConfig, &out.KuryrConfig - *out = new(KuryrConfig) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultNetworkDefinition. -func (in *DefaultNetworkDefinition) DeepCopy() *DefaultNetworkDefinition { - if in == nil { - return nil - } - out := new(DefaultNetworkDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeveloperConsoleCatalogCategory) DeepCopyInto(out *DeveloperConsoleCatalogCategory) { - *out = *in - in.DeveloperConsoleCatalogCategoryMeta.DeepCopyInto(&out.DeveloperConsoleCatalogCategoryMeta) - if in.Subcategories != nil { - in, out := &in.Subcategories, &out.Subcategories - *out = make([]DeveloperConsoleCatalogCategoryMeta, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCategory. -func (in *DeveloperConsoleCatalogCategory) DeepCopy() *DeveloperConsoleCatalogCategory { - if in == nil { - return nil - } - out := new(DeveloperConsoleCatalogCategory) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeveloperConsoleCatalogCategoryMeta) DeepCopyInto(out *DeveloperConsoleCatalogCategoryMeta) { - *out = *in - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCategoryMeta. -func (in *DeveloperConsoleCatalogCategoryMeta) DeepCopy() *DeveloperConsoleCatalogCategoryMeta { - if in == nil { - return nil - } - out := new(DeveloperConsoleCatalogCategoryMeta) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeveloperConsoleCatalogCustomization) DeepCopyInto(out *DeveloperConsoleCatalogCustomization) { - *out = *in - if in.Categories != nil { - in, out := &in.Categories, &out.Categories - *out = make([]DeveloperConsoleCatalogCategory, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCustomization. -func (in *DeveloperConsoleCatalogCustomization) DeepCopy() *DeveloperConsoleCatalogCustomization { - if in == nil { - return nil - } - out := new(DeveloperConsoleCatalogCustomization) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointPublishingStrategy) DeepCopyInto(out *EndpointPublishingStrategy) { - *out = *in - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(LoadBalancerStrategy) - (*in).DeepCopyInto(*out) - } - if in.HostNetwork != nil { - in, out := &in.HostNetwork, &out.HostNetwork - *out = new(HostNetworkStrategy) - **out = **in - } - if in.Private != nil { - in, out := &in.Private, &out.Private - *out = new(PrivateStrategy) - **out = **in - } - if in.NodePort != nil { - in, out := &in.NodePort, &out.NodePort - *out = new(NodePortStrategy) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPublishingStrategy. -func (in *EndpointPublishingStrategy) DeepCopy() *EndpointPublishingStrategy { - if in == nil { - return nil - } - out := new(EndpointPublishingStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Etcd) DeepCopyInto(out *Etcd) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd. -func (in *Etcd) DeepCopy() *Etcd { - if in == nil { - return nil - } - out := new(Etcd) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Etcd) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EtcdList) DeepCopyInto(out *EtcdList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Etcd, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdList. -func (in *EtcdList) DeepCopy() *EtcdList { - if in == nil { - return nil - } - out := new(EtcdList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EtcdList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) { - *out = *in - in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdSpec. -func (in *EtcdSpec) DeepCopy() *EtcdSpec { - if in == nil { - return nil - } - out := new(EtcdSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EtcdStatus) DeepCopyInto(out *EtcdStatus) { - *out = *in - in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStatus. -func (in *EtcdStatus) DeepCopy() *EtcdStatus { - if in == nil { - return nil - } - out := new(EtcdStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExportNetworkFlows) DeepCopyInto(out *ExportNetworkFlows) { - *out = *in - if in.NetFlow != nil { - in, out := &in.NetFlow, &out.NetFlow - *out = new(NetFlowConfig) - (*in).DeepCopyInto(*out) - } - if in.SFlow != nil { - in, out := &in.SFlow, &out.SFlow - *out = new(SFlowConfig) - (*in).DeepCopyInto(*out) - } - if in.IPFIX != nil { - in, out := &in.IPFIX, &out.IPFIX - *out = new(IPFIXConfig) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportNetworkFlows. -func (in *ExportNetworkFlows) DeepCopy() *ExportNetworkFlows { - if in == nil { - return nil - } - out := new(ExportNetworkFlows) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ForwardPlugin) DeepCopyInto(out *ForwardPlugin) { - *out = *in - if in.Upstreams != nil { - in, out := &in.Upstreams, &out.Upstreams - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.TransportConfig.DeepCopyInto(&out.TransportConfig) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardPlugin. -func (in *ForwardPlugin) DeepCopy() *ForwardPlugin { - if in == nil { - return nil - } - out := new(ForwardPlugin) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPLoadBalancerParameters) DeepCopyInto(out *GCPLoadBalancerParameters) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPLoadBalancerParameters. -func (in *GCPLoadBalancerParameters) DeepCopy() *GCPLoadBalancerParameters { - if in == nil { - return nil - } - out := new(GCPLoadBalancerParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GatewayConfig) DeepCopyInto(out *GatewayConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayConfig. -func (in *GatewayConfig) DeepCopy() *GatewayConfig { - if in == nil { - return nil - } - out := new(GatewayConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenerationStatus) DeepCopyInto(out *GenerationStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationStatus. -func (in *GenerationStatus) DeepCopy() *GenerationStatus { - if in == nil { - return nil - } - out := new(GenerationStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPCompressionPolicy) DeepCopyInto(out *HTTPCompressionPolicy) { - *out = *in - if in.MimeTypes != nil { - in, out := &in.MimeTypes, &out.MimeTypes - *out = make([]CompressionMIMEType, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCompressionPolicy. -func (in *HTTPCompressionPolicy) DeepCopy() *HTTPCompressionPolicy { - if in == nil { - return nil - } - out := new(HTTPCompressionPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostNetworkStrategy) DeepCopyInto(out *HostNetworkStrategy) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNetworkStrategy. -func (in *HostNetworkStrategy) DeepCopy() *HostNetworkStrategy { - if in == nil { - return nil - } - out := new(HostNetworkStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HybridOverlayConfig) DeepCopyInto(out *HybridOverlayConfig) { - *out = *in - if in.HybridClusterNetwork != nil { - in, out := &in.HybridClusterNetwork, &out.HybridClusterNetwork - *out = make([]ClusterNetworkEntry, len(*in)) - copy(*out, *in) - } - if in.HybridOverlayVXLANPort != nil { - in, out := &in.HybridOverlayVXLANPort, &out.HybridOverlayVXLANPort - *out = new(uint32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HybridOverlayConfig. -func (in *HybridOverlayConfig) DeepCopy() *HybridOverlayConfig { - if in == nil { - return nil - } - out := new(HybridOverlayConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAMConfig) DeepCopyInto(out *IPAMConfig) { - *out = *in - if in.StaticIPAMConfig != nil { - in, out := &in.StaticIPAMConfig, &out.StaticIPAMConfig - *out = new(StaticIPAMConfig) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfig. -func (in *IPAMConfig) DeepCopy() *IPAMConfig { - if in == nil { - return nil - } - out := new(IPAMConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPFIXConfig) DeepCopyInto(out *IPFIXConfig) { - *out = *in - if in.Collectors != nil { - in, out := &in.Collectors, &out.Collectors - *out = make([]IPPort, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPFIXConfig. -func (in *IPFIXConfig) DeepCopy() *IPFIXConfig { - if in == nil { - return nil - } - out := new(IPFIXConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPsecConfig) DeepCopyInto(out *IPsecConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecConfig. -func (in *IPsecConfig) DeepCopy() *IPsecConfig { - if in == nil { - return nil - } - out := new(IPsecConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressController) DeepCopyInto(out *IngressController) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressController. -func (in *IngressController) DeepCopy() *IngressController { - if in == nil { - return nil - } - out := new(IngressController) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IngressController) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressControllerCaptureHTTPCookie) DeepCopyInto(out *IngressControllerCaptureHTTPCookie) { - *out = *in - out.IngressControllerCaptureHTTPCookieUnion = in.IngressControllerCaptureHTTPCookieUnion - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPCookie. -func (in *IngressControllerCaptureHTTPCookie) DeepCopy() *IngressControllerCaptureHTTPCookie { - if in == nil { - return nil - } - out := new(IngressControllerCaptureHTTPCookie) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressControllerCaptureHTTPCookieUnion) DeepCopyInto(out *IngressControllerCaptureHTTPCookieUnion) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPCookieUnion. -func (in *IngressControllerCaptureHTTPCookieUnion) DeepCopy() *IngressControllerCaptureHTTPCookieUnion { - if in == nil { - return nil - } - out := new(IngressControllerCaptureHTTPCookieUnion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressControllerCaptureHTTPHeader) DeepCopyInto(out *IngressControllerCaptureHTTPHeader) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPHeader. -func (in *IngressControllerCaptureHTTPHeader) DeepCopy() *IngressControllerCaptureHTTPHeader { - if in == nil { - return nil - } - out := new(IngressControllerCaptureHTTPHeader) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressControllerCaptureHTTPHeaders) DeepCopyInto(out *IngressControllerCaptureHTTPHeaders) { - *out = *in - if in.Request != nil { - in, out := &in.Request, &out.Request - *out = make([]IngressControllerCaptureHTTPHeader, len(*in)) - copy(*out, *in) - } - if in.Response != nil { - in, out := &in.Response, &out.Response - *out = make([]IngressControllerCaptureHTTPHeader, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPHeaders. -func (in *IngressControllerCaptureHTTPHeaders) DeepCopy() *IngressControllerCaptureHTTPHeaders { - if in == nil { - return nil - } - out := new(IngressControllerCaptureHTTPHeaders) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressControllerHTTPHeaders) DeepCopyInto(out *IngressControllerHTTPHeaders) { - *out = *in - out.UniqueId = in.UniqueId - if in.HeaderNameCaseAdjustments != nil { - in, out := &in.HeaderNameCaseAdjustments, &out.HeaderNameCaseAdjustments - *out = make([]IngressControllerHTTPHeaderNameCaseAdjustment, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaders. -func (in *IngressControllerHTTPHeaders) DeepCopy() *IngressControllerHTTPHeaders { - if in == nil { - return nil - } - out := new(IngressControllerHTTPHeaders) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressControllerHTTPUniqueIdHeaderPolicy) DeepCopyInto(out *IngressControllerHTTPUniqueIdHeaderPolicy) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPUniqueIdHeaderPolicy. -func (in *IngressControllerHTTPUniqueIdHeaderPolicy) DeepCopy() *IngressControllerHTTPUniqueIdHeaderPolicy { - if in == nil { - return nil - } - out := new(IngressControllerHTTPUniqueIdHeaderPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressControllerList) DeepCopyInto(out *IngressControllerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]IngressController, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerList. -func (in *IngressControllerList) DeepCopy() *IngressControllerList { - if in == nil { - return nil - } - out := new(IngressControllerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IngressControllerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressControllerLogging) DeepCopyInto(out *IngressControllerLogging) { - *out = *in - if in.Access != nil { - in, out := &in.Access, &out.Access - *out = new(AccessLogging) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerLogging. -func (in *IngressControllerLogging) DeepCopy() *IngressControllerLogging { - if in == nil { - return nil - } - out := new(IngressControllerLogging) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressControllerSpec) DeepCopyInto(out *IngressControllerSpec) { - *out = *in - out.HttpErrorCodePages = in.HttpErrorCodePages - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.EndpointPublishingStrategy != nil { - in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy - *out = new(EndpointPublishingStrategy) - (*in).DeepCopyInto(*out) - } - if in.DefaultCertificate != nil { - in, out := &in.DefaultCertificate, &out.DefaultCertificate - *out = new(corev1.LocalObjectReference) - **out = **in - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.RouteSelector != nil { - in, out := &in.RouteSelector, &out.RouteSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.NodePlacement != nil { - in, out := &in.NodePlacement, &out.NodePlacement - *out = new(NodePlacement) - (*in).DeepCopyInto(*out) - } - if in.TLSSecurityProfile != nil { - in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile - *out = new(configv1.TLSSecurityProfile) - (*in).DeepCopyInto(*out) - } - in.ClientTLS.DeepCopyInto(&out.ClientTLS) - if in.RouteAdmission != nil { - in, out := &in.RouteAdmission, &out.RouteAdmission - *out = new(RouteAdmissionPolicy) - **out = **in - } - if in.Logging != nil { - in, out := &in.Logging, &out.Logging - *out = new(IngressControllerLogging) - (*in).DeepCopyInto(*out) - } - if in.HTTPHeaders != nil { - in, out := &in.HTTPHeaders, &out.HTTPHeaders - *out = new(IngressControllerHTTPHeaders) - (*in).DeepCopyInto(*out) - } - in.TuningOptions.DeepCopyInto(&out.TuningOptions) - in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides) - in.HTTPCompression.DeepCopyInto(&out.HTTPCompression) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerSpec. -func (in *IngressControllerSpec) DeepCopy() *IngressControllerSpec { - if in == nil { - return nil - } - out := new(IngressControllerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressControllerStatus) DeepCopyInto(out *IngressControllerStatus) { - *out = *in - if in.EndpointPublishingStrategy != nil { - in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy - *out = new(EndpointPublishingStrategy) - (*in).DeepCopyInto(*out) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]OperatorCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TLSProfile != nil { - in, out := &in.TLSProfile, &out.TLSProfile - *out = new(configv1.TLSProfileSpec) - (*in).DeepCopyInto(*out) - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.RouteSelector != nil { - in, out := &in.RouteSelector, &out.RouteSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerStatus. -func (in *IngressControllerStatus) DeepCopy() *IngressControllerStatus { - if in == nil { - return nil - } - out := new(IngressControllerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressControllerTuningOptions) DeepCopyInto(out *IngressControllerTuningOptions) { - *out = *in - if in.ClientTimeout != nil { - in, out := &in.ClientTimeout, &out.ClientTimeout - *out = new(metav1.Duration) - **out = **in - } - if in.ClientFinTimeout != nil { - in, out := &in.ClientFinTimeout, &out.ClientFinTimeout - *out = new(metav1.Duration) - **out = **in - } - if in.ServerTimeout != nil { - in, out := &in.ServerTimeout, &out.ServerTimeout - *out = new(metav1.Duration) - **out = **in - } - if in.ServerFinTimeout != nil { - in, out := &in.ServerFinTimeout, &out.ServerFinTimeout - *out = new(metav1.Duration) - **out = **in - } - if in.TunnelTimeout != nil { - in, out := &in.TunnelTimeout, &out.TunnelTimeout - *out = new(metav1.Duration) - **out = **in - } - if in.TLSInspectDelay != nil { - in, out := &in.TLSInspectDelay, &out.TLSInspectDelay - *out = new(metav1.Duration) - **out = **in - } - if in.HealthCheckInterval != nil { - in, out := &in.HealthCheckInterval, &out.HealthCheckInterval - *out = new(metav1.Duration) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerTuningOptions. -func (in *IngressControllerTuningOptions) DeepCopy() *IngressControllerTuningOptions { - if in == nil { - return nil - } - out := new(IngressControllerTuningOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeAPIServer) DeepCopyInto(out *KubeAPIServer) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServer. -func (in *KubeAPIServer) DeepCopy() *KubeAPIServer { - if in == nil { - return nil - } - out := new(KubeAPIServer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeAPIServer) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeAPIServerList) DeepCopyInto(out *KubeAPIServerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KubeAPIServer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerList. -func (in *KubeAPIServerList) DeepCopy() *KubeAPIServerList { - if in == nil { - return nil - } - out := new(KubeAPIServerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeAPIServerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeAPIServerSpec) DeepCopyInto(out *KubeAPIServerSpec) { - *out = *in - in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerSpec. -func (in *KubeAPIServerSpec) DeepCopy() *KubeAPIServerSpec { - if in == nil { - return nil - } - out := new(KubeAPIServerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeAPIServerStatus) DeepCopyInto(out *KubeAPIServerStatus) { - *out = *in - in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerStatus. -func (in *KubeAPIServerStatus) DeepCopy() *KubeAPIServerStatus { - if in == nil { - return nil - } - out := new(KubeAPIServerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeControllerManager) DeepCopyInto(out *KubeControllerManager) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManager. -func (in *KubeControllerManager) DeepCopy() *KubeControllerManager { - if in == nil { - return nil - } - out := new(KubeControllerManager) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeControllerManager) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeControllerManagerList) DeepCopyInto(out *KubeControllerManagerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KubeControllerManager, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerList. -func (in *KubeControllerManagerList) DeepCopy() *KubeControllerManagerList { - if in == nil { - return nil - } - out := new(KubeControllerManagerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeControllerManagerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeControllerManagerSpec) DeepCopyInto(out *KubeControllerManagerSpec) { - *out = *in - in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerSpec. -func (in *KubeControllerManagerSpec) DeepCopy() *KubeControllerManagerSpec { - if in == nil { - return nil - } - out := new(KubeControllerManagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeControllerManagerStatus) DeepCopyInto(out *KubeControllerManagerStatus) { - *out = *in - in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerStatus. -func (in *KubeControllerManagerStatus) DeepCopy() *KubeControllerManagerStatus { - if in == nil { - return nil - } - out := new(KubeControllerManagerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeScheduler) DeepCopyInto(out *KubeScheduler) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeScheduler. -func (in *KubeScheduler) DeepCopy() *KubeScheduler { - if in == nil { - return nil - } - out := new(KubeScheduler) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeScheduler) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerList) DeepCopyInto(out *KubeSchedulerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KubeScheduler, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerList. -func (in *KubeSchedulerList) DeepCopy() *KubeSchedulerList { - if in == nil { - return nil - } - out := new(KubeSchedulerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeSchedulerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerSpec) DeepCopyInto(out *KubeSchedulerSpec) { - *out = *in - in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerSpec. -func (in *KubeSchedulerSpec) DeepCopy() *KubeSchedulerSpec { - if in == nil { - return nil - } - out := new(KubeSchedulerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerStatus) DeepCopyInto(out *KubeSchedulerStatus) { - *out = *in - in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerStatus. -func (in *KubeSchedulerStatus) DeepCopy() *KubeSchedulerStatus { - if in == nil { - return nil - } - out := new(KubeSchedulerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeStorageVersionMigrator) DeepCopyInto(out *KubeStorageVersionMigrator) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigrator. -func (in *KubeStorageVersionMigrator) DeepCopy() *KubeStorageVersionMigrator { - if in == nil { - return nil - } - out := new(KubeStorageVersionMigrator) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeStorageVersionMigrator) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeStorageVersionMigratorList) DeepCopyInto(out *KubeStorageVersionMigratorList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KubeStorageVersionMigrator, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorList. -func (in *KubeStorageVersionMigratorList) DeepCopy() *KubeStorageVersionMigratorList { - if in == nil { - return nil - } - out := new(KubeStorageVersionMigratorList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeStorageVersionMigratorList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeStorageVersionMigratorSpec) DeepCopyInto(out *KubeStorageVersionMigratorSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorSpec. -func (in *KubeStorageVersionMigratorSpec) DeepCopy() *KubeStorageVersionMigratorSpec { - if in == nil { - return nil - } - out := new(KubeStorageVersionMigratorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeStorageVersionMigratorStatus) DeepCopyInto(out *KubeStorageVersionMigratorStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorStatus. -func (in *KubeStorageVersionMigratorStatus) DeepCopy() *KubeStorageVersionMigratorStatus { - if in == nil { - return nil - } - out := new(KubeStorageVersionMigratorStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KuryrConfig) DeepCopyInto(out *KuryrConfig) { - *out = *in - if in.DaemonProbesPort != nil { - in, out := &in.DaemonProbesPort, &out.DaemonProbesPort - *out = new(uint32) - **out = **in - } - if in.ControllerProbesPort != nil { - in, out := &in.ControllerProbesPort, &out.ControllerProbesPort - *out = new(uint32) - **out = **in - } - if in.PoolBatchPorts != nil { - in, out := &in.PoolBatchPorts, &out.PoolBatchPorts - *out = new(uint) - **out = **in - } - if in.MTU != nil { - in, out := &in.MTU, &out.MTU - *out = new(uint32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KuryrConfig. -func (in *KuryrConfig) DeepCopy() *KuryrConfig { - if in == nil { - return nil - } - out := new(KuryrConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerStrategy) DeepCopyInto(out *LoadBalancerStrategy) { - *out = *in - if in.ProviderParameters != nil { - in, out := &in.ProviderParameters, &out.ProviderParameters - *out = new(ProviderLoadBalancerParameters) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStrategy. -func (in *LoadBalancerStrategy) DeepCopy() *LoadBalancerStrategy { - if in == nil { - return nil - } - out := new(LoadBalancerStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoggingDestination) DeepCopyInto(out *LoggingDestination) { - *out = *in - if in.Syslog != nil { - in, out := &in.Syslog, &out.Syslog - *out = new(SyslogLoggingDestinationParameters) - **out = **in - } - if in.Container != nil { - in, out := &in.Container, &out.Container - *out = new(ContainerLoggingDestinationParameters) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingDestination. -func (in *LoggingDestination) DeepCopy() *LoggingDestination { - if in == nil { - return nil - } - out := new(LoggingDestination) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MTUMigration) DeepCopyInto(out *MTUMigration) { - *out = *in - if in.Network != nil { - in, out := &in.Network, &out.Network - *out = new(MTUMigrationValues) - (*in).DeepCopyInto(*out) - } - if in.Machine != nil { - in, out := &in.Machine, &out.Machine - *out = new(MTUMigrationValues) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigration. -func (in *MTUMigration) DeepCopy() *MTUMigration { - if in == nil { - return nil - } - out := new(MTUMigration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MTUMigrationValues) DeepCopyInto(out *MTUMigrationValues) { - *out = *in - if in.To != nil { - in, out := &in.To, &out.To - *out = new(uint32) - **out = **in - } - if in.From != nil { - in, out := &in.From, &out.From - *out = new(uint32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigrationValues. -func (in *MTUMigrationValues) DeepCopy() *MTUMigrationValues { - if in == nil { - return nil - } - out := new(MTUMigrationValues) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MyOperatorResource) DeepCopyInto(out *MyOperatorResource) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResource. -func (in *MyOperatorResource) DeepCopy() *MyOperatorResource { - if in == nil { - return nil - } - out := new(MyOperatorResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MyOperatorResourceSpec) DeepCopyInto(out *MyOperatorResourceSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceSpec. -func (in *MyOperatorResourceSpec) DeepCopy() *MyOperatorResourceSpec { - if in == nil { - return nil - } - out := new(MyOperatorResourceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MyOperatorResourceStatus) DeepCopyInto(out *MyOperatorResourceStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceStatus. -func (in *MyOperatorResourceStatus) DeepCopy() *MyOperatorResourceStatus { - if in == nil { - return nil - } - out := new(MyOperatorResourceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetFlowConfig) DeepCopyInto(out *NetFlowConfig) { - *out = *in - if in.Collectors != nil { - in, out := &in.Collectors, &out.Collectors - *out = make([]IPPort, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetFlowConfig. -func (in *NetFlowConfig) DeepCopy() *NetFlowConfig { - if in == nil { - return nil - } - out := new(NetFlowConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Network) DeepCopyInto(out *Network) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. -func (in *Network) DeepCopy() *Network { - if in == nil { - return nil - } - out := new(Network) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Network) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkList) DeepCopyInto(out *NetworkList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Network, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. -func (in *NetworkList) DeepCopy() *NetworkList { - if in == nil { - return nil - } - out := new(NetworkList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NetworkList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) { - *out = *in - if in.MTU != nil { - in, out := &in.MTU, &out.MTU - *out = new(MTUMigration) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration. -func (in *NetworkMigration) DeepCopy() *NetworkMigration { - if in == nil { - return nil - } - out := new(NetworkMigration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - if in.ClusterNetwork != nil { - in, out := &in.ClusterNetwork, &out.ClusterNetwork - *out = make([]ClusterNetworkEntry, len(*in)) - copy(*out, *in) - } - if in.ServiceNetwork != nil { - in, out := &in.ServiceNetwork, &out.ServiceNetwork - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.DefaultNetwork.DeepCopyInto(&out.DefaultNetwork) - if in.AdditionalNetworks != nil { - in, out := &in.AdditionalNetworks, &out.AdditionalNetworks - *out = make([]AdditionalNetworkDefinition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DisableMultiNetwork != nil { - in, out := &in.DisableMultiNetwork, &out.DisableMultiNetwork - *out = new(bool) - **out = **in - } - if in.UseMultiNetworkPolicy != nil { - in, out := &in.UseMultiNetworkPolicy, &out.UseMultiNetworkPolicy - *out = new(bool) - **out = **in - } - if in.DeployKubeProxy != nil { - in, out := &in.DeployKubeProxy, &out.DeployKubeProxy - *out = new(bool) - **out = **in - } - if in.KubeProxyConfig != nil { - in, out := &in.KubeProxyConfig, &out.KubeProxyConfig - *out = new(ProxyConfig) - (*in).DeepCopyInto(*out) - } - if in.ExportNetworkFlows != nil { - in, out := &in.ExportNetworkFlows, &out.ExportNetworkFlows - *out = new(ExportNetworkFlows) - (*in).DeepCopyInto(*out) - } - if in.Migration != nil { - in, out := &in.Migration, &out.Migration - *out = new(NetworkMigration) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. -func (in *NetworkSpec) DeepCopy() *NetworkSpec { - if in == nil { - return nil - } - out := new(NetworkSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. -func (in *NetworkStatus) DeepCopy() *NetworkStatus { - if in == nil { - return nil - } - out := new(NetworkStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodePlacement) DeepCopyInto(out *NodePlacement) { - *out = *in - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePlacement. -func (in *NodePlacement) DeepCopy() *NodePlacement { - if in == nil { - return nil - } - out := new(NodePlacement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodePortStrategy) DeepCopyInto(out *NodePortStrategy) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortStrategy. -func (in *NodePortStrategy) DeepCopy() *NodePortStrategy { - if in == nil { - return nil - } - out := new(NodePortStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { - *out = *in - if in.LastFailedTime != nil { - in, out := &in.LastFailedTime, &out.LastFailedTime - *out = (*in).DeepCopy() - } - if in.LastFailedRevisionErrors != nil { - in, out := &in.LastFailedRevisionErrors, &out.LastFailedRevisionErrors - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. -func (in *NodeStatus) DeepCopy() *NodeStatus { - if in == nil { - return nil - } - out := new(NodeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OAuthAPIServerStatus) DeepCopyInto(out *OAuthAPIServerStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAPIServerStatus. -func (in *OAuthAPIServerStatus) DeepCopy() *OAuthAPIServerStatus { - if in == nil { - return nil - } - out := new(OAuthAPIServerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { - *out = *in - if in.MTU != nil { - in, out := &in.MTU, &out.MTU - *out = new(uint32) - **out = **in - } - if in.GenevePort != nil { - in, out := &in.GenevePort, &out.GenevePort - *out = new(uint32) - **out = **in - } - if in.HybridOverlayConfig != nil { - in, out := &in.HybridOverlayConfig, &out.HybridOverlayConfig - *out = new(HybridOverlayConfig) - (*in).DeepCopyInto(*out) - } - if in.IPsecConfig != nil { - in, out := &in.IPsecConfig, &out.IPsecConfig - *out = new(IPsecConfig) - **out = **in - } - if in.PolicyAuditConfig != nil { - in, out := &in.PolicyAuditConfig, &out.PolicyAuditConfig - *out = new(PolicyAuditConfig) - (*in).DeepCopyInto(*out) - } - if in.GatewayConfig != nil { - in, out := &in.GatewayConfig, &out.GatewayConfig - *out = new(GatewayConfig) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVNKubernetesConfig. -func (in *OVNKubernetesConfig) DeepCopy() *OVNKubernetesConfig { - if in == nil { - return nil - } - out := new(OVNKubernetesConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenShiftAPIServer) DeepCopyInto(out *OpenShiftAPIServer) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServer. -func (in *OpenShiftAPIServer) DeepCopy() *OpenShiftAPIServer { - if in == nil { - return nil - } - out := new(OpenShiftAPIServer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenShiftAPIServer) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenShiftAPIServerList) DeepCopyInto(out *OpenShiftAPIServerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]OpenShiftAPIServer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerList. -func (in *OpenShiftAPIServerList) DeepCopy() *OpenShiftAPIServerList { - if in == nil { - return nil - } - out := new(OpenShiftAPIServerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenShiftAPIServerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenShiftAPIServerSpec) DeepCopyInto(out *OpenShiftAPIServerSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerSpec. -func (in *OpenShiftAPIServerSpec) DeepCopy() *OpenShiftAPIServerSpec { - if in == nil { - return nil - } - out := new(OpenShiftAPIServerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenShiftAPIServerStatus) DeepCopyInto(out *OpenShiftAPIServerStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerStatus. -func (in *OpenShiftAPIServerStatus) DeepCopy() *OpenShiftAPIServerStatus { - if in == nil { - return nil - } - out := new(OpenShiftAPIServerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenShiftControllerManager) DeepCopyInto(out *OpenShiftControllerManager) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManager. -func (in *OpenShiftControllerManager) DeepCopy() *OpenShiftControllerManager { - if in == nil { - return nil - } - out := new(OpenShiftControllerManager) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenShiftControllerManager) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenShiftControllerManagerList) DeepCopyInto(out *OpenShiftControllerManagerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]OpenShiftControllerManager, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerList. -func (in *OpenShiftControllerManagerList) DeepCopy() *OpenShiftControllerManagerList { - if in == nil { - return nil - } - out := new(OpenShiftControllerManagerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenShiftControllerManagerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenShiftControllerManagerSpec) DeepCopyInto(out *OpenShiftControllerManagerSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerSpec. -func (in *OpenShiftControllerManagerSpec) DeepCopy() *OpenShiftControllerManagerSpec { - if in == nil { - return nil - } - out := new(OpenShiftControllerManagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenShiftControllerManagerStatus) DeepCopyInto(out *OpenShiftControllerManagerStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerStatus. -func (in *OpenShiftControllerManagerStatus) DeepCopy() *OpenShiftControllerManagerStatus { - if in == nil { - return nil - } - out := new(OpenShiftControllerManagerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenShiftSDNConfig) DeepCopyInto(out *OpenShiftSDNConfig) { - *out = *in - if in.VXLANPort != nil { - in, out := &in.VXLANPort, &out.VXLANPort - *out = new(uint32) - **out = **in - } - if in.MTU != nil { - in, out := &in.MTU, &out.MTU - *out = new(uint32) - **out = **in - } - if in.UseExternalOpenvswitch != nil { - in, out := &in.UseExternalOpenvswitch, &out.UseExternalOpenvswitch - *out = new(bool) - **out = **in - } - if in.EnableUnidling != nil { - in, out := &in.EnableUnidling, &out.EnableUnidling - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftSDNConfig. -func (in *OpenShiftSDNConfig) DeepCopy() *OpenShiftSDNConfig { - if in == nil { - return nil - } - out := new(OpenShiftSDNConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorCondition. -func (in *OperatorCondition) DeepCopy() *OperatorCondition { - if in == nil { - return nil - } - out := new(OperatorCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) { - *out = *in - in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides) - in.ObservedConfig.DeepCopyInto(&out.ObservedConfig) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec. -func (in *OperatorSpec) DeepCopy() *OperatorSpec { - if in == nil { - return nil - } - out := new(OperatorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]OperatorCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Generations != nil { - in, out := &in.Generations, &out.Generations - *out = make([]GenerationStatus, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus. -func (in *OperatorStatus) DeepCopy() *OperatorStatus { - if in == nil { - return nil - } - out := new(OperatorStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PolicyAuditConfig) DeepCopyInto(out *PolicyAuditConfig) { - *out = *in - if in.RateLimit != nil { - in, out := &in.RateLimit, &out.RateLimit - *out = new(uint32) - **out = **in - } - if in.MaxFileSize != nil { - in, out := &in.MaxFileSize, &out.MaxFileSize - *out = new(uint32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyAuditConfig. -func (in *PolicyAuditConfig) DeepCopy() *PolicyAuditConfig { - if in == nil { - return nil - } - out := new(PolicyAuditConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrivateStrategy) DeepCopyInto(out *PrivateStrategy) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateStrategy. -func (in *PrivateStrategy) DeepCopy() *PrivateStrategy { - if in == nil { - return nil - } - out := new(PrivateStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectAccess) DeepCopyInto(out *ProjectAccess) { - *out = *in - if in.AvailableClusterRoles != nil { - in, out := &in.AvailableClusterRoles, &out.AvailableClusterRoles - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectAccess. -func (in *ProjectAccess) DeepCopy() *ProjectAccess { - if in == nil { - return nil - } - out := new(ProjectAccess) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProviderLoadBalancerParameters) DeepCopyInto(out *ProviderLoadBalancerParameters) { - *out = *in - if in.AWS != nil { - in, out := &in.AWS, &out.AWS - *out = new(AWSLoadBalancerParameters) - (*in).DeepCopyInto(*out) - } - if in.GCP != nil { - in, out := &in.GCP, &out.GCP - *out = new(GCPLoadBalancerParameters) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderLoadBalancerParameters. -func (in *ProviderLoadBalancerParameters) DeepCopy() *ProviderLoadBalancerParameters { - if in == nil { - return nil - } - out := new(ProviderLoadBalancerParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ProxyArgumentList) DeepCopyInto(out *ProxyArgumentList) { - { - in := &in - *out = make(ProxyArgumentList, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyArgumentList. -func (in ProxyArgumentList) DeepCopy() ProxyArgumentList { - if in == nil { - return nil - } - out := new(ProxyArgumentList) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { - *out = *in - if in.ProxyArguments != nil { - in, out := &in.ProxyArguments, &out.ProxyArguments - *out = make(map[string]ProxyArgumentList, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(ProxyArgumentList, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. -func (in *ProxyConfig) DeepCopy() *ProxyConfig { - if in == nil { - return nil - } - out := new(ProxyConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QuickStarts) DeepCopyInto(out *QuickStarts) { - *out = *in - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickStarts. -func (in *QuickStarts) DeepCopy() *QuickStarts { - if in == nil { - return nil - } - out := new(QuickStarts) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouteAdmissionPolicy) DeepCopyInto(out *RouteAdmissionPolicy) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteAdmissionPolicy. -func (in *RouteAdmissionPolicy) DeepCopy() *RouteAdmissionPolicy { - if in == nil { - return nil - } - out := new(RouteAdmissionPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SFlowConfig) DeepCopyInto(out *SFlowConfig) { - *out = *in - if in.Collectors != nil { - in, out := &in.Collectors, &out.Collectors - *out = make([]IPPort, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SFlowConfig. -func (in *SFlowConfig) DeepCopy() *SFlowConfig { - if in == nil { - return nil - } - out := new(SFlowConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Server) DeepCopyInto(out *Server) { - *out = *in - if in.Zones != nil { - in, out := &in.Zones, &out.Zones - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.ForwardPlugin.DeepCopyInto(&out.ForwardPlugin) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. -func (in *Server) DeepCopy() *Server { - if in == nil { - return nil - } - out := new(Server) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCA) DeepCopyInto(out *ServiceCA) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCA. -func (in *ServiceCA) DeepCopy() *ServiceCA { - if in == nil { - return nil - } - out := new(ServiceCA) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceCA) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCAList) DeepCopyInto(out *ServiceCAList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceCA, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAList. -func (in *ServiceCAList) DeepCopy() *ServiceCAList { - if in == nil { - return nil - } - out := new(ServiceCAList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceCAList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCASpec) DeepCopyInto(out *ServiceCASpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCASpec. -func (in *ServiceCASpec) DeepCopy() *ServiceCASpec { - if in == nil { - return nil - } - out := new(ServiceCASpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCAStatus) DeepCopyInto(out *ServiceCAStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAStatus. -func (in *ServiceCAStatus) DeepCopy() *ServiceCAStatus { - if in == nil { - return nil - } - out := new(ServiceCAStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCatalogAPIServer) DeepCopyInto(out *ServiceCatalogAPIServer) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServer. -func (in *ServiceCatalogAPIServer) DeepCopy() *ServiceCatalogAPIServer { - if in == nil { - return nil - } - out := new(ServiceCatalogAPIServer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceCatalogAPIServer) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCatalogAPIServerList) DeepCopyInto(out *ServiceCatalogAPIServerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceCatalogAPIServer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerList. -func (in *ServiceCatalogAPIServerList) DeepCopy() *ServiceCatalogAPIServerList { - if in == nil { - return nil - } - out := new(ServiceCatalogAPIServerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceCatalogAPIServerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCatalogAPIServerSpec) DeepCopyInto(out *ServiceCatalogAPIServerSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerSpec. -func (in *ServiceCatalogAPIServerSpec) DeepCopy() *ServiceCatalogAPIServerSpec { - if in == nil { - return nil - } - out := new(ServiceCatalogAPIServerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCatalogAPIServerStatus) DeepCopyInto(out *ServiceCatalogAPIServerStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerStatus. -func (in *ServiceCatalogAPIServerStatus) DeepCopy() *ServiceCatalogAPIServerStatus { - if in == nil { - return nil - } - out := new(ServiceCatalogAPIServerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCatalogControllerManager) DeepCopyInto(out *ServiceCatalogControllerManager) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManager. -func (in *ServiceCatalogControllerManager) DeepCopy() *ServiceCatalogControllerManager { - if in == nil { - return nil - } - out := new(ServiceCatalogControllerManager) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceCatalogControllerManager) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCatalogControllerManagerList) DeepCopyInto(out *ServiceCatalogControllerManagerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceCatalogControllerManager, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerList. -func (in *ServiceCatalogControllerManagerList) DeepCopy() *ServiceCatalogControllerManagerList { - if in == nil { - return nil - } - out := new(ServiceCatalogControllerManagerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceCatalogControllerManagerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCatalogControllerManagerSpec) DeepCopyInto(out *ServiceCatalogControllerManagerSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerSpec. -func (in *ServiceCatalogControllerManagerSpec) DeepCopy() *ServiceCatalogControllerManagerSpec { - if in == nil { - return nil - } - out := new(ServiceCatalogControllerManagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCatalogControllerManagerStatus) DeepCopyInto(out *ServiceCatalogControllerManagerStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerStatus. -func (in *ServiceCatalogControllerManagerStatus) DeepCopy() *ServiceCatalogControllerManagerStatus { - if in == nil { - return nil - } - out := new(ServiceCatalogControllerManagerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SimpleMacvlanConfig) DeepCopyInto(out *SimpleMacvlanConfig) { - *out = *in - if in.IPAMConfig != nil { - in, out := &in.IPAMConfig, &out.IPAMConfig - *out = new(IPAMConfig) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleMacvlanConfig. -func (in *SimpleMacvlanConfig) DeepCopy() *SimpleMacvlanConfig { - if in == nil { - return nil - } - out := new(SimpleMacvlanConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StaticIPAMAddresses) DeepCopyInto(out *StaticIPAMAddresses) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMAddresses. -func (in *StaticIPAMAddresses) DeepCopy() *StaticIPAMAddresses { - if in == nil { - return nil - } - out := new(StaticIPAMAddresses) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StaticIPAMConfig) DeepCopyInto(out *StaticIPAMConfig) { - *out = *in - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]StaticIPAMAddresses, len(*in)) - copy(*out, *in) - } - if in.Routes != nil { - in, out := &in.Routes, &out.Routes - *out = make([]StaticIPAMRoutes, len(*in)) - copy(*out, *in) - } - if in.DNS != nil { - in, out := &in.DNS, &out.DNS - *out = new(StaticIPAMDNS) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMConfig. -func (in *StaticIPAMConfig) DeepCopy() *StaticIPAMConfig { - if in == nil { - return nil - } - out := new(StaticIPAMConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StaticIPAMDNS) DeepCopyInto(out *StaticIPAMDNS) { - *out = *in - if in.Nameservers != nil { - in, out := &in.Nameservers, &out.Nameservers - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Search != nil { - in, out := &in.Search, &out.Search - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMDNS. -func (in *StaticIPAMDNS) DeepCopy() *StaticIPAMDNS { - if in == nil { - return nil - } - out := new(StaticIPAMDNS) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StaticIPAMRoutes) DeepCopyInto(out *StaticIPAMRoutes) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMRoutes. -func (in *StaticIPAMRoutes) DeepCopy() *StaticIPAMRoutes { - if in == nil { - return nil - } - out := new(StaticIPAMRoutes) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StaticPodOperatorSpec) DeepCopyInto(out *StaticPodOperatorSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorSpec. -func (in *StaticPodOperatorSpec) DeepCopy() *StaticPodOperatorSpec { - if in == nil { - return nil - } - out := new(StaticPodOperatorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StaticPodOperatorStatus) DeepCopyInto(out *StaticPodOperatorStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - if in.NodeStatuses != nil { - in, out := &in.NodeStatuses, &out.NodeStatuses - *out = make([]NodeStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorStatus. -func (in *StaticPodOperatorStatus) DeepCopy() *StaticPodOperatorStatus { - if in == nil { - return nil - } - out := new(StaticPodOperatorStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatuspageProvider) DeepCopyInto(out *StatuspageProvider) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatuspageProvider. -func (in *StatuspageProvider) DeepCopy() *StatuspageProvider { - if in == nil { - return nil - } - out := new(StatuspageProvider) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Storage) DeepCopyInto(out *Storage) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. -func (in *Storage) DeepCopy() *Storage { - if in == nil { - return nil - } - out := new(Storage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Storage) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageList) DeepCopyInto(out *StorageList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Storage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageList. -func (in *StorageList) DeepCopy() *StorageList { - if in == nil { - return nil - } - out := new(StorageList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StorageList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { - *out = *in - in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. -func (in *StorageSpec) DeepCopy() *StorageSpec { - if in == nil { - return nil - } - out := new(StorageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageStatus) DeepCopyInto(out *StorageStatus) { - *out = *in - in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatus. -func (in *StorageStatus) DeepCopy() *StorageStatus { - if in == nil { - return nil - } - out := new(StorageStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SyslogLoggingDestinationParameters) DeepCopyInto(out *SyslogLoggingDestinationParameters) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogLoggingDestinationParameters. -func (in *SyslogLoggingDestinationParameters) DeepCopy() *SyslogLoggingDestinationParameters { - if in == nil { - return nil - } - out := new(SyslogLoggingDestinationParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Upstream) DeepCopyInto(out *Upstream) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Upstream. -func (in *Upstream) DeepCopy() *Upstream { - if in == nil { - return nil - } - out := new(Upstream) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UpstreamResolvers) DeepCopyInto(out *UpstreamResolvers) { - *out = *in - if in.Upstreams != nil { - in, out := &in.Upstreams, &out.Upstreams - *out = make([]Upstream, len(*in)) - copy(*out, *in) - } - in.TransportConfig.DeepCopyInto(&out.TransportConfig) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamResolvers. -func (in *UpstreamResolvers) DeepCopy() *UpstreamResolvers { - if in == nil { - return nil - } - out := new(UpstreamResolvers) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go deleted file mode 100644 index 379c83a21..000000000 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ /dev/null @@ -1,1363 +0,0 @@ -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_GenerationStatus = map[string]string{ - "": "GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made.", - "group": "group is the group of the thing you're tracking", - "resource": "resource is the resource type of the thing you're tracking", - "namespace": "namespace is where the thing you're tracking is", - "name": "name is the name of the thing you're tracking", - "lastGeneration": "lastGeneration is the last generation of the workload controller involved", - "hash": "hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps", -} - -func (GenerationStatus) SwaggerDoc() map[string]string { - return map_GenerationStatus -} - -var map_MyOperatorResource = map[string]string{ - "": "MyOperatorResource is an example operator configuration type\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", -} - -func (MyOperatorResource) SwaggerDoc() map[string]string { - return map_MyOperatorResource -} - -var map_NodeStatus = map[string]string{ - "": "NodeStatus provides information about the current state of a particular node managed by this operator.", - "nodeName": "nodeName is the name of the node", - "currentRevision": "currentRevision is the generation of the most recently successful deployment", - "targetRevision": "targetRevision is the generation of the deployment we're trying to apply", - "lastFailedRevision": "lastFailedRevision is the generation of the deployment we tried and failed to deploy.", - "lastFailedTime": "lastFailedTime is the time the last failed revision failed the last time.", - "lastFailedReason": "lastFailedReason is a machine readable failure reason string.", - "lastFailedCount": "lastFailedCount is how often the installer pod of the last failed revision failed.", - "lastFallbackCount": "lastFallbackCount is how often a fallback to a previous revision happened.", - "lastFailedRevisionErrors": "lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision.", -} - -func (NodeStatus) SwaggerDoc() map[string]string { - return map_NodeStatus -} - -var map_OperatorCondition = map[string]string{ - "": "OperatorCondition is just the standard condition fields.", -} - -func (OperatorCondition) SwaggerDoc() map[string]string { - return map_OperatorCondition -} - -var map_OperatorSpec = map[string]string{ - "": "OperatorSpec contains common fields operators need. It is intended to be anonymous included inside of the Spec struct for your particular operator.", - "managementState": "managementState indicates whether and how the operator should manage the component", - "logLevel": "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", - "operatorLogLevel": "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", - "unsupportedConfigOverrides": "unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides", - "observedConfig": "observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator", -} - -func (OperatorSpec) SwaggerDoc() map[string]string { - return map_OperatorSpec -} - -var map_OperatorStatus = map[string]string{ - "observedGeneration": "observedGeneration is the last generation change you've dealt with", - "conditions": "conditions is a list of conditions and their status", - "version": "version is the level this availability applies to", - "readyReplicas": "readyReplicas indicates how many replicas are ready and at the desired state", - "generations": "generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction.", -} - -func (OperatorStatus) SwaggerDoc() map[string]string { - return map_OperatorStatus -} - -var map_StaticPodOperatorSpec = map[string]string{ - "": "StaticPodOperatorSpec is spec for controllers that manage static pods.", - "forceRedeploymentReason": "forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", - "failedRevisionLimit": "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", - "succeededRevisionLimit": "succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", -} - -func (StaticPodOperatorSpec) SwaggerDoc() map[string]string { - return map_StaticPodOperatorSpec -} - -var map_StaticPodOperatorStatus = map[string]string{ - "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.", - "latestAvailableRevision": "latestAvailableRevision is the deploymentID of the most recent deployment", - "latestAvailableRevisionReason": "latestAvailableRevisionReason describe the detailed reason for the most recent deployment", - "nodeStatuses": "nodeStatuses track the deployment values and errors across individual nodes", -} - -func (StaticPodOperatorStatus) SwaggerDoc() map[string]string { - return map_StaticPodOperatorStatus -} - -var map_Authentication = map[string]string{ - "": "Authentication provides information to configure an operator to manage authentication.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (Authentication) SwaggerDoc() map[string]string { - return map_Authentication -} - -var map_AuthenticationList = map[string]string{ - "": "AuthenticationList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (AuthenticationList) SwaggerDoc() map[string]string { - return map_AuthenticationList -} - -var map_AuthenticationStatus = map[string]string{ - "oauthAPIServer": "OAuthAPIServer holds status specific only to oauth-apiserver", -} - -func (AuthenticationStatus) SwaggerDoc() map[string]string { - return map_AuthenticationStatus -} - -var map_OAuthAPIServerStatus = map[string]string{ - "latestAvailableRevision": "LatestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", -} - -func (OAuthAPIServerStatus) SwaggerDoc() map[string]string { - return map_OAuthAPIServerStatus -} - -var map_CloudCredential = map[string]string{ - "": "CloudCredential provides a means to configure an operator to manage CredentialsRequests.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (CloudCredential) SwaggerDoc() map[string]string { - return map_CloudCredential -} - -var map_CloudCredentialList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (CloudCredentialList) SwaggerDoc() map[string]string { - return map_CloudCredentialList -} - -var map_CloudCredentialSpec = map[string]string{ - "": "CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator.", - "credentialsMode": "CredentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into \"manual\" mode if desired. Leaving the field in default mode runs CCO so that the cluster's cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes:\n AWS/Azure/GCP: \"\" (Default), \"Mint\", \"Passthrough\", \"Manual\"\n Others: Do not set value as other platforms only support running in \"Passthrough\"", -} - -func (CloudCredentialSpec) SwaggerDoc() map[string]string { - return map_CloudCredentialSpec -} - -var map_CloudCredentialStatus = map[string]string{ - "": "CloudCredentialStatus defines the observed status of the cloud-credential-operator.", -} - -func (CloudCredentialStatus) SwaggerDoc() map[string]string { - return map_CloudCredentialStatus -} - -var map_Config = map[string]string{ - "": "Config provides information to configure the config operator. It handles installation, migration or synchronization of cloud based cluster configurations like AWS or Azure.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the Config Operator.", - "status": "status defines the observed status of the Config Operator.", -} - -func (Config) SwaggerDoc() map[string]string { - return map_Config -} - -var map_ConfigList = map[string]string{ - "": "ConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", -} - -func (ConfigList) SwaggerDoc() map[string]string { - return map_ConfigList -} - -var map_AddPage = map[string]string{ - "": "AddPage allows customizing actions on the Add page in developer perspective.", - "disabledActions": "disabledActions is a list of actions that are not shown to users. Each action in the list is represented by its ID.", -} - -func (AddPage) SwaggerDoc() map[string]string { - return map_AddPage -} - -var map_Console = map[string]string{ - "": "Console provides a means to configure an operator to manage the console.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (Console) SwaggerDoc() map[string]string { - return map_Console -} - -var map_ConsoleConfigRoute = map[string]string{ - "": "ConsoleConfigRoute holds information on external route access to console. DEPRECATED", - "hostname": "hostname is the desired custom domain under which console will be available.", - "secret": "secret points to secret in the openshift-config namespace that contains custom certificate and key and needs to be created manually by the cluster admin. Referenced Secret is required to contain following key value pairs: - \"tls.crt\" - to specifies custom certificate - \"tls.key\" - to specifies private key of the custom certificate If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.", -} - -func (ConsoleConfigRoute) SwaggerDoc() map[string]string { - return map_ConsoleConfigRoute -} - -var map_ConsoleCustomization = map[string]string{ - "": "ConsoleCustomization defines a list of optional configuration for the console UI.", - "brand": "brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout.", - "documentationBaseURL": "documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout.", - "customProductName": "customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog instead of the normal OpenShift product name.", - "customLogoFile": "customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred", - "developerCatalog": "developerCatalog allows to configure the shown developer catalog categories.", - "projectAccess": "projectAccess allows customizing the available list of ClusterRoles in the Developer perspective Project access page which can be used by a project admin to specify roles to other users and restrict access within the project. If set, the list will replace the default ClusterRole options.", - "quickStarts": "quickStarts allows customization of available ConsoleQuickStart resources in console.", - "addPage": "addPage allows customizing actions on the Add page in developer perspective.", -} - -func (ConsoleCustomization) SwaggerDoc() map[string]string { - return map_ConsoleCustomization -} - -var map_ConsoleList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (ConsoleList) SwaggerDoc() map[string]string { - return map_ConsoleList -} - -var map_ConsoleProviders = map[string]string{ - "": "ConsoleProviders defines a list of optional additional providers of functionality to the console.", - "statuspage": "statuspage contains ID for statuspage.io page that provides status info about.", -} - -func (ConsoleProviders) SwaggerDoc() map[string]string { - return map_ConsoleProviders -} - -var map_ConsoleSpec = map[string]string{ - "": "ConsoleSpec is the specification of the desired behavior of the Console.", - "customization": "customization is used to optionally provide a small set of customization options to the web console.", - "providers": "providers contains configuration for using specific service providers.", - "route": "route contains hostname and secret reference that contains the serving certificate. If a custom route is specified, a new route will be created with the provided hostname, under which console will be available. In case of custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. The default console route will be maintained to reserve the default hostname for console if the custom route is removed. If not specified, default route will be used. DEPRECATED", - "plugins": "plugins defines a list of enabled console plugin names.", -} - -func (ConsoleSpec) SwaggerDoc() map[string]string { - return map_ConsoleSpec -} - -var map_ConsoleStatus = map[string]string{ - "": "ConsoleStatus defines the observed status of the Console.", -} - -func (ConsoleStatus) SwaggerDoc() map[string]string { - return map_ConsoleStatus -} - -var map_DeveloperConsoleCatalogCategory = map[string]string{ - "": "DeveloperConsoleCatalogCategory for the developer console catalog.", - "subcategories": "subcategories defines a list of child categories.", -} - -func (DeveloperConsoleCatalogCategory) SwaggerDoc() map[string]string { - return map_DeveloperConsoleCatalogCategory -} - -var map_DeveloperConsoleCatalogCategoryMeta = map[string]string{ - "": "DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category.", - "id": "ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.", - "label": "label defines a category display label. It is required and must have 1-64 characters.", - "tags": "tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item.", -} - -func (DeveloperConsoleCatalogCategoryMeta) SwaggerDoc() map[string]string { - return map_DeveloperConsoleCatalogCategoryMeta -} - -var map_DeveloperConsoleCatalogCustomization = map[string]string{ - "": "DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog.", - "categories": "categories which are shown in the developer catalog.", -} - -func (DeveloperConsoleCatalogCustomization) SwaggerDoc() map[string]string { - return map_DeveloperConsoleCatalogCustomization -} - -var map_ProjectAccess = map[string]string{ - "": "ProjectAccess contains options for project access roles", - "availableClusterRoles": "availableClusterRoles is the list of ClusterRole names that are assignable to users through the project access tab.", -} - -func (ProjectAccess) SwaggerDoc() map[string]string { - return map_ProjectAccess -} - -var map_QuickStarts = map[string]string{ - "": "QuickStarts allow cluster admins to customize available ConsoleQuickStart resources.", - "disabled": "disabled is a list of ConsoleQuickStart resource names that are not shown to users.", -} - -func (QuickStarts) SwaggerDoc() map[string]string { - return map_QuickStarts -} - -var map_StatuspageProvider = map[string]string{ - "": "StatuspageProvider provides identity for statuspage account.", - "pageID": "pageID is the unique ID assigned by Statuspage for your page. This must be a public page.", -} - -func (StatuspageProvider) SwaggerDoc() map[string]string { - return map_StatuspageProvider -} - -var map_ClusterCSIDriver = map[string]string{ - "": "ClusterCSIDriver object allows management and configuration of a CSI driver operator installed by default in OpenShift. Name of the object must be name of the CSI driver it operates. See CSIDriverName type for list of allowed values.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (ClusterCSIDriver) SwaggerDoc() map[string]string { - return map_ClusterCSIDriver -} - -var map_ClusterCSIDriverList = map[string]string{ - "": "ClusterCSIDriverList contains a list of ClusterCSIDriver\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (ClusterCSIDriverList) SwaggerDoc() map[string]string { - return map_ClusterCSIDriverList -} - -var map_ClusterCSIDriverSpec = map[string]string{ - "": "ClusterCSIDriverSpec is the desired behavior of CSI driver operator", -} - -func (ClusterCSIDriverSpec) SwaggerDoc() map[string]string { - return map_ClusterCSIDriverSpec -} - -var map_ClusterCSIDriverStatus = map[string]string{ - "": "ClusterCSIDriverStatus is the observed status of CSI driver operator", -} - -func (ClusterCSIDriverStatus) SwaggerDoc() map[string]string { - return map_ClusterCSIDriverStatus -} - -var map_CSISnapshotController = map[string]string{ - "": "CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (CSISnapshotController) SwaggerDoc() map[string]string { - return map_CSISnapshotController -} - -var map_CSISnapshotControllerList = map[string]string{ - "": "CSISnapshotControllerList contains a list of CSISnapshotControllers.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (CSISnapshotControllerList) SwaggerDoc() map[string]string { - return map_CSISnapshotControllerList -} - -var map_CSISnapshotControllerSpec = map[string]string{ - "": "CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator.", -} - -func (CSISnapshotControllerSpec) SwaggerDoc() map[string]string { - return map_CSISnapshotControllerSpec -} - -var map_CSISnapshotControllerStatus = map[string]string{ - "": "CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator.", -} - -func (CSISnapshotControllerStatus) SwaggerDoc() map[string]string { - return map_CSISnapshotControllerStatus -} - -var map_DNS = map[string]string{ - "": "DNS manages the CoreDNS component to provide a name resolution service for pods and services in the cluster.\n\nThis supports the DNS-based service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md\n\nMore details: https://kubernetes.io/docs/tasks/administer-cluster/coredns\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the DNS.", - "status": "status is the most recently observed status of the DNS.", -} - -func (DNS) SwaggerDoc() map[string]string { - return map_DNS -} - -var map_DNSList = map[string]string{ - "": "DNSList contains a list of DNS\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (DNSList) SwaggerDoc() map[string]string { - return map_DNSList -} - -var map_DNSNodePlacement = map[string]string{ - "": "DNSNodePlacement describes the node scheduling configuration for DNS pods.", - "nodeSelector": "nodeSelector is the node selector applied to DNS pods.\n\nIf empty, the default is used, which is currently the following:\n\n kubernetes.io/os: linux\n\nThis default is subject to change.\n\nIf set, the specified selector is used and replaces the default.", - "tolerations": "tolerations is a list of tolerations applied to DNS pods.\n\nIf empty, the DNS operator sets a toleration for the \"node-role.kubernetes.io/master\" taint. This default is subject to change. Specifying tolerations without including a toleration for the \"node-role.kubernetes.io/master\" taint may be risky as it could lead to an outage if all worker nodes become unavailable.\n\nNote that the daemon controller adds some tolerations as well. See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/", -} - -func (DNSNodePlacement) SwaggerDoc() map[string]string { - return map_DNSNodePlacement -} - -var map_DNSOverTLSConfig = map[string]string{ - "": "DNSOverTLSConfig describes optional DNSTransportConfig fields that should be captured.", - "serverName": "serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is set to \"TLS\". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the TLS certificate installed in the upstream resolver(s).", - "caBundle": "caBundle references a ConfigMap that must contain either a single CA Certificate or a CA Bundle. This allows cluster administrators to provide their own CA or CA bundle for validating the certificate of upstream resolvers.\n\n1. The configmap must contain a `ca-bundle.crt` key. 2. The value must be a PEM encoded CA certificate or CA bundle. 3. The administrator must create this configmap in the openshift-config namespace. 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName.", -} - -func (DNSOverTLSConfig) SwaggerDoc() map[string]string { - return map_DNSOverTLSConfig -} - -var map_DNSSpec = map[string]string{ - "": "DNSSpec is the specification of the desired behavior of the DNS.", - "servers": "servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server.\n\nFor example, if there are two Servers, one for \"foo.com\" and another for \"a.foo.com\", and the name query is for \"www.a.foo.com\", it will be routed to the Server with Zone \"a.foo.com\".\n\nIf this field is nil, no servers are created.", - "upstreamResolvers": "upstreamResolvers defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers for the case of the default (\".\") server\n\nIf this field is not specified, the upstream used will default to /etc/resolv.conf, with policy \"sequential\"", - "nodePlacement": "nodePlacement provides explicit control over the scheduling of DNS pods.\n\nGenerally, it is useful to run a DNS pod on every node so that DNS queries are always handled by a local DNS pod instead of going over the network to a DNS pod on another node. However, security policies may require restricting the placement of DNS pods to specific nodes. For example, if a security policy prohibits pods on arbitrary nodes from communicating with the API, a node selector can be specified to restrict DNS pods to nodes that are permitted to communicate with the API. Conversely, if running DNS pods on nodes with a particular taint is desired, a toleration can be specified for that taint.\n\nIf unset, defaults are used. See nodePlacement for more details.", - "managementState": "managementState indicates whether the DNS operator should manage cluster DNS", - "operatorLogLevel": "operatorLogLevel controls the logging level of the DNS Operator. Valid values are: \"Normal\", \"Debug\", \"Trace\". Defaults to \"Normal\". setting operatorLogLevel: Trace will produce extremely verbose logs.", - "logLevel": "logLevel describes the desired logging verbosity for CoreDNS. Any one of the following values may be specified: * Normal logs errors from upstream resolvers. * Debug logs errors, NXDOMAIN responses, and NODATA responses. * Trace logs errors and all responses.\n Setting logLevel: Trace will produce extremely verbose logs.\nValid values are: \"Normal\", \"Debug\", \"Trace\". Defaults to \"Normal\".", -} - -func (DNSSpec) SwaggerDoc() map[string]string { - return map_DNSSpec -} - -var map_DNSStatus = map[string]string{ - "": "DNSStatus defines the observed status of the DNS.", - "clusterIP": "clusterIP is the service IP through which this DNS is made available.\n\nIn the case of the default DNS, this will be a well known IP that is used as the default nameserver for pods that are using the default ClusterFirst DNS policy.\n\nIn general, this IP can be specified in a pod's spec.dnsConfig.nameservers list or used explicitly when performing name resolution from within the cluster. Example: dig foo.com @\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "clusterDomain": "clusterDomain is the local cluster DNS domain suffix for DNS services. This will be a subdomain as defined in RFC 1034, section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 Example: \"cluster.local\"\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service", - "conditions": "conditions provide information about the state of the DNS on the cluster.\n\nThese are the supported DNS conditions:\n\n * Available\n - True if the following conditions are met:\n * DNS controller daemonset is available.\n - False if any of those conditions are unsatisfied.", -} - -func (DNSStatus) SwaggerDoc() map[string]string { - return map_DNSStatus -} - -var map_DNSTransportConfig = map[string]string{ - "": "DNSTransportConfig groups related configuration parameters used for configuring forwarding to upstream resolvers that support DNS-over-TLS.", - "transport": "transport allows cluster administrators to opt-in to using a DNS-over-TLS connection between cluster DNS and an upstream resolver(s). Configuring TLS as the transport at this level without configuring a CABundle will result in the system certificates being used to verify the serving certificate of the upstream resolver(s).\n\nPossible values: \"\" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject to change over time. The current default is \"Cleartext\". \"Cleartext\" - Cluster admin specified cleartext option. This results in the same functionality as an empty value but may be useful when a cluster admin wants to be more explicit about the transport, or wants to switch from \"TLS\" to \"Cleartext\" explicitly. \"TLS\" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS, you MUST also set ServerName. If a port is not included with the upstream IP, port 853 will be tried by default per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1.", - "tls": "tls contains the additional configuration options to use when Transport is set to \"TLS\".", -} - -func (DNSTransportConfig) SwaggerDoc() map[string]string { - return map_DNSTransportConfig -} - -var map_ForwardPlugin = map[string]string{ - "": "ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.", - "upstreams": "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin.", - "policy": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Random\"", - "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", -} - -func (ForwardPlugin) SwaggerDoc() map[string]string { - return map_ForwardPlugin -} - -var map_Server = map[string]string{ - "": "Server defines the schema for a server that runs per instance of CoreDNS.", - "name": "name is required and specifies a unique name for the server. Name must comply with the Service Name Syntax of rfc6335.", - "zones": "zones is required and specifies the subdomains that Server is authoritative for. Zones must conform to the rfc1123 definition of a subdomain. Specifying the cluster domain (i.e., \"cluster.local\") is invalid.", - "forwardPlugin": "forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers.", -} - -func (Server) SwaggerDoc() map[string]string { - return map_Server -} - -var map_Upstream = map[string]string{ - "": "Upstream can either be of type SystemResolvConf, or of type Network.\n\n* For an Upstream of type SystemResolvConf, no further fields are necessary:\n The upstream will be configured to use /etc/resolv.conf.\n* For an Upstream of type Network, a NetworkResolver field needs to be defined\n with an IP address or IP:port if the upstream listens on a port other than 53.", - "type": "Type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. Type accepts 2 possible values: SystemResolvConf or Network.\n\n* When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined:\n /etc/resolv.conf will be used\n* When Network is used, the Upstream structure must contain at least an Address", - "address": "Address must be defined when Type is set to Network. It will be ignored otherwise. It must be a valid ipv4 or ipv6 address.", - "port": "Port may be defined when Type is set to Network. It will be ignored otherwise. Port must be between 65535", -} - -func (Upstream) SwaggerDoc() map[string]string { - return map_Upstream -} - -var map_UpstreamResolvers = map[string]string{ - "": "UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the specific case of the default (\".\") server. It defers from ForwardPlugin in the default values it accepts: * At least one upstream should be specified. * the default policy is Sequential", - "upstreams": "Upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default", - "policy": "Policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"", - "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", -} - -func (UpstreamResolvers) SwaggerDoc() map[string]string { - return map_UpstreamResolvers -} - -var map_Etcd = map[string]string{ - "": "Etcd provides information to configure an operator to manage etcd.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (Etcd) SwaggerDoc() map[string]string { - return map_Etcd -} - -var map_EtcdList = map[string]string{ - "": "KubeAPISOperatorConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", -} - -func (EtcdList) SwaggerDoc() map[string]string { - return map_EtcdList -} - -var map_AWSClassicLoadBalancerParameters = map[string]string{ - "": "AWSClassicLoadBalancerParameters holds configuration parameters for an AWS Classic load balancer.", - "connectionIdleTimeout": "connectionIdleTimeout specifies the maximum time period that a connection may be idle before the load balancer closes the connection. The value must be parseable as a time duration value; see . A nil or zero value means no opinion, in which case a default value is used. The default value for this field is 60s. This default is subject to change.", -} - -func (AWSClassicLoadBalancerParameters) SwaggerDoc() map[string]string { - return map_AWSClassicLoadBalancerParameters -} - -var map_AWSLoadBalancerParameters = map[string]string{ - "": "AWSLoadBalancerParameters provides configuration settings that are specific to AWS load balancers.", - "type": "type is the type of AWS load balancer to instantiate for an ingresscontroller.\n\nValid values are:\n\n* \"Classic\": A Classic Load Balancer that makes routing decisions at either\n the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See\n the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb\n\n* \"NLB\": A Network Load Balancer that makes routing decisions at the\n transport layer (TCP/SSL). See the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb", - "classicLoadBalancer": "classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic.", - "networkLoadBalancer": "networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB.", -} - -func (AWSLoadBalancerParameters) SwaggerDoc() map[string]string { - return map_AWSLoadBalancerParameters -} - -var map_AWSNetworkLoadBalancerParameters = map[string]string{ - "": "AWSNetworkLoadBalancerParameters holds configuration parameters for an AWS Network load balancer.", -} - -func (AWSNetworkLoadBalancerParameters) SwaggerDoc() map[string]string { - return map_AWSNetworkLoadBalancerParameters -} - -var map_AccessLogging = map[string]string{ - "": "AccessLogging describes how client requests should be logged.", - "destination": "destination is where access logs go.", - "httpLogFormat": "httpLogFormat specifies the format of the log message for an HTTP request.\n\nIf this field is empty, log messages use the implementation's default HTTP log format. For HAProxy's default HTTP log format, see the HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3\n\nNote that this format only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). It does not affect the log format for TLS passthrough connections.", - "httpCaptureHeaders": "httpCaptureHeaders defines HTTP headers that should be captured in access logs. If this field is empty, no headers are captured.\n\nNote that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be captured for TLS passthrough connections.", - "httpCaptureCookies": "httpCaptureCookies specifies HTTP cookies that should be captured in access logs. If this field is empty, no cookies are captured.", - "logEmptyRequests": "logEmptyRequests specifies how connections on which no request is received should be logged. Typically, these empty requests come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\"), in which case logging these requests may be undesirable. However, these requests may also be caused by network errors, in which case logging empty requests may be useful for diagnosing the errors. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts. Allowed values for this field are \"Log\" and \"Ignore\". The default value is \"Log\".", -} - -func (AccessLogging) SwaggerDoc() map[string]string { - return map_AccessLogging -} - -var map_ClientTLS = map[string]string{ - "": "ClientTLS specifies TLS configuration to enable client-to-server authentication, which can be used for mutual TLS.", - "clientCertificatePolicy": "clientCertificatePolicy specifies whether the ingress controller requires clients to provide certificates. This field accepts the values \"Required\" or \"Optional\".\n\nNote that the ingress controller only checks client certificates for edge-terminated and reencrypt TLS routes; it cannot check certificates for cleartext HTTP or passthrough TLS routes.", - "clientCA": "clientCA specifies a configmap containing the PEM-encoded CA certificate bundle that should be used to verify a client's certificate. The administrator must create this configmap in the openshift-config namespace.", - "allowedSubjectPatterns": "allowedSubjectPatterns specifies a list of regular expressions that should be matched against the distinguished name on a valid client certificate to filter requests. The regular expressions must use PCRE syntax. If this list is empty, no filtering is performed. If the list is nonempty, then at least one pattern must match a client certificate's distinguished name or else the ingress controller rejects the certificate and denies the connection.", -} - -func (ClientTLS) SwaggerDoc() map[string]string { - return map_ClientTLS -} - -var map_ContainerLoggingDestinationParameters = map[string]string{ - "": "ContainerLoggingDestinationParameters describes parameters for the Container logging destination type.", -} - -func (ContainerLoggingDestinationParameters) SwaggerDoc() map[string]string { - return map_ContainerLoggingDestinationParameters -} - -var map_EndpointPublishingStrategy = map[string]string{ - "": "EndpointPublishingStrategy is a way to publish the endpoints of an IngressController, and represents the type and any additional configuration for a specific type.", - "type": "type is the publishing strategy to use. Valid values are:\n\n* LoadBalancerService\n\nPublishes the ingress controller using a Kubernetes LoadBalancer Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment.\n\nSee: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer\n\nIf domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone.\n\nWildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms.\n\n* HostNetwork\n\nPublishes the ingress controller on node ports where the ingress controller is deployed.\n\nIn this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports.\n\n* Private\n\nDoes not publish the ingress controller.\n\nIn this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller.\n\n* NodePortService\n\nPublishes the ingress controller using a Kubernetes NodePort Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A NodePort Service is created to publish the deployment. The specific node ports are dynamically allocated by OpenShift; however, to support static port allocations, user changes to the node port field of the managed NodePort Service will preserved.", - "loadBalancer": "loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService.", - "hostNetwork": "hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork.", - "private": "private holds parameters for the Private endpoint publishing strategy. Present only if type is Private.", - "nodePort": "nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService.", -} - -func (EndpointPublishingStrategy) SwaggerDoc() map[string]string { - return map_EndpointPublishingStrategy -} - -var map_GCPLoadBalancerParameters = map[string]string{ - "": "GCPLoadBalancerParameters provides configuration settings that are specific to GCP load balancers.", - "clientAccess": "clientAccess describes how client access is restricted for internal load balancers.\n\nValid values are: * \"Global\": Specifying an internal load balancer with Global client access\n allows clients from any region within the VPC to communicate with the load\n balancer.\n\n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access\n\n* \"Local\": Specifying an internal load balancer with Local client access\n means only clients within the same region (and VPC) as the GCP load balancer\n can communicate with the load balancer. Note that this is the default behavior.\n\n https://cloud.google.com/load-balancing/docs/internal#client_access", -} - -func (GCPLoadBalancerParameters) SwaggerDoc() map[string]string { - return map_GCPLoadBalancerParameters -} - -var map_HTTPCompressionPolicy = map[string]string{ - "": "httpCompressionPolicy turns on compression for the specified MIME types.\n\nThis field is optional, and its absence implies that compression should not be enabled globally in HAProxy.\n\nIf httpCompressionPolicy exists, compression should be enabled only for the specified MIME types.", - "mimeTypes": "mimeTypes is a list of MIME types that should have compression applied. This list can be empty, in which case the ingress controller does not apply compression.\n\nNote: Not all MIME types benefit from compression, but HAProxy will still use resources to try to compress if instructed to. Generally speaking, text (html, css, js, etc.) formats benefit from compression, but formats that are already compressed (image, audio, video, etc.) benefit little in exchange for the time and cpu spent on compressing again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2", -} - -func (HTTPCompressionPolicy) SwaggerDoc() map[string]string { - return map_HTTPCompressionPolicy -} - -var map_HostNetworkStrategy = map[string]string{ - "": "HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing strategy.", - "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", - "httpPort": "httpPort is the port on the host which should be used to listen for HTTP requests. This field should be set when port 80 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 80.", - "httpsPort": "httpsPort is the port on the host which should be used to listen for HTTPS requests. This field should be set when port 443 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 443.", - "statsPort": "statsPort is the port on the host where the stats from the router are published. The value should not coincide with the NodePort range of the cluster. If an external load balancer is configured to forward connections to this IngressController, the load balancer should use this port for health checks. The load balancer can send HTTP probes on this port on a given node, with the path /healthz/ready to determine if the ingress controller is ready to receive traffic on the node. For proper operation the load balancer must not forward traffic to a node until the health check reports ready. The load balancer should also stop forwarding requests within a maximum of 45 seconds after /healthz/ready starts reporting not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with a threshold of two successful or failed requests to become healthy or unhealthy respectively, are well-tested values. When the value is 0 or is not specified it defaults to 1936.", -} - -func (HostNetworkStrategy) SwaggerDoc() map[string]string { - return map_HostNetworkStrategy -} - -var map_IngressController = map[string]string{ - "": "IngressController describes a managed ingress controller for the cluster. The controller can service OpenShift Route and Kubernetes Ingress resources.\n\nWhen an IngressController is created, a new ingress controller deployment is created to allow external traffic to reach the services that expose Ingress or Route resources. Updating this resource may lead to disruption for public facing network connections as a new ingress controller revision may be rolled out.\n\nhttps://kubernetes.io/docs/concepts/services-networking/ingress-controllers\n\nWhenever possible, sensible defaults for the platform are used. See each field for more details.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the IngressController.", - "status": "status is the most recently observed status of the IngressController.", -} - -func (IngressController) SwaggerDoc() map[string]string { - return map_IngressController -} - -var map_IngressControllerCaptureHTTPCookie = map[string]string{ - "": "IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be captured.", - "maxLength": "maxLength specifies a maximum length of the string that will be logged, which includes the cookie name, cookie value, and one-character delimiter. If the log entry exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request.", -} - -func (IngressControllerCaptureHTTPCookie) SwaggerDoc() map[string]string { - return map_IngressControllerCaptureHTTPCookie -} - -var map_IngressControllerCaptureHTTPCookieUnion = map[string]string{ - "": "IngressControllerCaptureHTTPCookieUnion describes optional fields of an HTTP cookie that should be captured.", - "matchType": "matchType specifies the type of match to be performed on the cookie name. Allowed values are \"Exact\" for an exact string match and \"Prefix\" for a string prefix match. If \"Exact\" is specified, a name must be specified in the name field. If \"Prefix\" is provided, a prefix must be specified in the namePrefix field. For example, specifying matchType \"Prefix\" and namePrefix \"foo\" will capture a cookie named \"foo\" or \"foobar\" but not one named \"bar\". The first matching cookie is captured.", - "name": "name specifies a cookie name. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1.", - "namePrefix": "namePrefix specifies a cookie name prefix. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1.", -} - -func (IngressControllerCaptureHTTPCookieUnion) SwaggerDoc() map[string]string { - return map_IngressControllerCaptureHTTPCookieUnion -} - -var map_IngressControllerCaptureHTTPHeader = map[string]string{ - "": "IngressControllerCaptureHTTPHeader describes an HTTP header that should be captured.", - "name": "name specifies a header name. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2.", - "maxLength": "maxLength specifies a maximum length for the header value. If a header value exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request.", -} - -func (IngressControllerCaptureHTTPHeader) SwaggerDoc() map[string]string { - return map_IngressControllerCaptureHTTPHeader -} - -var map_IngressControllerCaptureHTTPHeaders = map[string]string{ - "": "IngressControllerCaptureHTTPHeaders specifies which HTTP headers the IngressController captures.", - "request": "request specifies which HTTP request headers to capture.\n\nIf this field is empty, no request headers are captured.", - "response": "response specifies which HTTP response headers to capture.\n\nIf this field is empty, no response headers are captured.", -} - -func (IngressControllerCaptureHTTPHeaders) SwaggerDoc() map[string]string { - return map_IngressControllerCaptureHTTPHeaders -} - -var map_IngressControllerHTTPHeaders = map[string]string{ - "": "IngressControllerHTTPHeaders specifies how the IngressController handles certain HTTP headers.", - "forwardedHeaderPolicy": "forwardedHeaderPolicy specifies when and how the IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version HTTP headers. The value may be one of the following:\n\n* \"Append\", which specifies that the IngressController appends the\n headers, preserving existing headers.\n\n* \"Replace\", which specifies that the IngressController sets the\n headers, replacing any existing Forwarded or X-Forwarded-* headers.\n\n* \"IfNone\", which specifies that the IngressController sets the\n headers if they are not already set.\n\n* \"Never\", which specifies that the IngressController never sets the\n headers, preserving any existing headers.\n\nBy default, the policy is \"Append\".", - "uniqueId": "uniqueId describes configuration for a custom HTTP header that the ingress controller should inject into incoming HTTP requests. Typically, this header is configured to have a value that is unique to the HTTP request. The header can be used by applications or included in access logs to facilitate tracing individual HTTP requests.\n\nIf this field is empty, no such header is injected into requests.", - "headerNameCaseAdjustments": "headerNameCaseAdjustments specifies case adjustments that can be applied to HTTP header names. Each adjustment is specified as an HTTP header name with the desired capitalization. For example, specifying \"X-Forwarded-For\" indicates that the \"x-forwarded-for\" HTTP header should be adjusted to have the specified capitalization.\n\nThese adjustments are only applied to cleartext, edge-terminated, and re-encrypt routes, and only when using HTTP/1.\n\nFor request headers, these adjustments are applied only for routes that have the haproxy.router.openshift.io/h1-adjust-case=true annotation. For response headers, these adjustments are applied to all HTTP responses.\n\nIf this field is empty, no request headers are adjusted.", -} - -func (IngressControllerHTTPHeaders) SwaggerDoc() map[string]string { - return map_IngressControllerHTTPHeaders -} - -var map_IngressControllerHTTPUniqueIdHeaderPolicy = map[string]string{ - "": "IngressControllerHTTPUniqueIdHeaderPolicy describes configuration for a unique id header.", - "name": "name specifies the name of the HTTP header (for example, \"unique-id\") that the ingress controller should inject into HTTP requests. The field's value must be a valid HTTP header name as defined in RFC 2616 section 4.2. If the field is empty, no header is injected.", - "format": "format specifies the format for the injected HTTP header's value. This field has no effect unless name is specified. For the HAProxy-based ingress controller implementation, this format uses the same syntax as the HTTP log format. If the field is empty, the default value is \"%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid\"; see the corresponding HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3", -} - -func (IngressControllerHTTPUniqueIdHeaderPolicy) SwaggerDoc() map[string]string { - return map_IngressControllerHTTPUniqueIdHeaderPolicy -} - -var map_IngressControllerList = map[string]string{ - "": "IngressControllerList contains a list of IngressControllers.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (IngressControllerList) SwaggerDoc() map[string]string { - return map_IngressControllerList -} - -var map_IngressControllerLogging = map[string]string{ - "": "IngressControllerLogging describes what should be logged where.", - "access": "access describes how the client requests should be logged.\n\nIf this field is empty, access logging is disabled.", -} - -func (IngressControllerLogging) SwaggerDoc() map[string]string { - return map_IngressControllerLogging -} - -var map_IngressControllerSpec = map[string]string{ - "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", - "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", - "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.", - "replicas": "replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nThe value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively.\n\nThese defaults are subject to change.", - "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n AlibabaCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", - "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nIf a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", - "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", - "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", - "nodePlacement": "nodePlacement enables explicit control over the scheduling of the ingress controller.\n\nIf unset, defaults are used. See NodePlacement for more details.", - "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers.\n\nIf unset, the default is based on the apiservers.config.openshift.io/cluster resource.\n\nNote that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout.", - "clientTLS": "clientTLS specifies settings for requesting and verifying client certificates, which can be used to enable mutual TLS for edge-terminated and reencrypt routes.", - "routeAdmission": "routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces).\n\nIf empty, defaults will be applied. See specific routeAdmission fields for details about their defaults.", - "logging": "logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled.", - "httpHeaders": "httpHeaders defines policy for HTTP headers.\n\nIf this field is empty, the default values are used.", - "httpEmptyRequestsPolicy": "httpEmptyRequestsPolicy describes how HTTP connections should be handled if the connection times out before a request is received. Allowed values for this field are \"Respond\" and \"Ignore\". If the field is set to \"Respond\", the ingress controller sends an HTTP 400 or 408 response, logs the connection (if access logging is enabled), and counts the connection in the appropriate metrics. If the field is set to \"Ignore\", the ingress controller closes the connection without sending a response, logging the connection, or incrementing metrics. The default value is \"Respond\".\n\nTypically, these connections come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\") and can be safely ignored. However, these requests may also be caused by network errors, and so setting this field to \"Ignore\" may impede detection and diagnosis of problems. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts.", - "tuningOptions": "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details.\n\nSetting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations.", - "unsupportedConfigOverrides": "unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported.", - "httpCompression": "httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression.", -} - -func (IngressControllerSpec) SwaggerDoc() map[string]string { - return map_IngressControllerSpec -} - -var map_IngressControllerStatus = map[string]string{ - "": "IngressControllerStatus defines the observed status of the IngressController.", - "availableReplicas": "availableReplicas is number of observed available replicas according to the ingress controller deployment.", - "selector": "selector is a label selector, in string format, for ingress controller pods corresponding to the IngressController. The number of matching pods should equal the value of availableReplicas.", - "domain": "domain is the actual domain in use.", - "endpointPublishingStrategy": "endpointPublishingStrategy is the actual strategy in use.", - "conditions": "conditions is a list of conditions and their status.\n\nAvailable means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas)\n\nThere are additional conditions which indicate the status of other ingress controller features and capabilities.\n\n * LoadBalancerManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy requires a service load balancer.\n - False if any of those conditions are unsatisfied.\n\n * LoadBalancerReady\n - True if the following conditions are met:\n * A load balancer is managed.\n * The load balancer is ready.\n - False if any of those conditions are unsatisfied.\n\n * DNSManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy and platform support DNS.\n * The ingress controller domain is set.\n * dns.config.openshift.io/cluster configures DNS zones.\n - False if any of those conditions are unsatisfied.\n\n * DNSReady\n - True if the following conditions are met:\n * DNS is managed.\n * DNS records have been successfully created.\n - False if any of those conditions are unsatisfied.", - "tlsProfile": "tlsProfile is the TLS connection configuration that is in effect.", - "observedGeneration": "observedGeneration is the most recent generation observed.", - "namespaceSelector": "namespaceSelector is the actual namespaceSelector in use.", - "routeSelector": "routeSelector is the actual routeSelector in use.", -} - -func (IngressControllerStatus) SwaggerDoc() map[string]string { - return map_IngressControllerStatus -} - -var map_IngressControllerTuningOptions = map[string]string{ - "": "IngressControllerTuningOptions specifies options for tuning the performance of ingress controller pods", - "headerBufferBytes": "headerBufferBytes describes how much memory should be reserved (in bytes) for IngressController connection sessions. Note that this value must be at least 16384 if HTTP/2 is enabled for the IngressController (https://tools.ietf.org/html/rfc7540). If this field is empty, the IngressController will use a default value of 32768 bytes.\n\nSetting this field is generally not recommended as headerBufferBytes values that are too small may break the IngressController and headerBufferBytes values that are too large could cause the IngressController to use significantly more memory than necessary.", - "headerBufferMaxRewriteBytes": "headerBufferMaxRewriteBytes describes how much memory should be reserved (in bytes) from headerBufferBytes for HTTP header rewriting and appending for IngressController connection sessions. Note that incoming HTTP requests will be limited to (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning headerBufferBytes must be greater than headerBufferMaxRewriteBytes. If this field is empty, the IngressController will use a default value of 8192 bytes.\n\nSetting this field is generally not recommended as headerBufferMaxRewriteBytes values that are too small may break the IngressController and headerBufferMaxRewriteBytes values that are too large could cause the IngressController to use significantly more memory than necessary.", - "threadCount": "threadCount defines the number of threads created per HAProxy process. Creating more threads allows each ingress controller pod to handle more connections, at the cost of more system resources being used. HAProxy currently supports up to 64 threads. If this field is empty, the IngressController will use the default value. The current default is 4 threads, but this may change in future releases.\n\nSetting this field is generally not recommended. Increasing the number of HAProxy threads allows ingress controller pods to utilize more CPU time under load, potentially starving other pods if set too high. Reducing the number of threads may cause the ingress controller to perform poorly.", - "clientTimeout": "clientTimeout defines how long a connection will be held open while waiting for a client response.\n\nIf unset, the default timeout is 30s", - "clientFinTimeout": "clientFinTimeout defines how long a connection will be held open while waiting for the client response to the server/backend closing the connection.\n\nIf unset, the default timeout is 1s", - "serverTimeout": "serverTimeout defines how long a connection will be held open while waiting for a server/backend response.\n\nIf unset, the default timeout is 30s", - "serverFinTimeout": "serverFinTimeout defines how long a connection will be held open while waiting for the server/backend response to the client closing the connection.\n\nIf unset, the default timeout is 1s", - "tunnelTimeout": "tunnelTimeout defines how long a tunnel connection (including websockets) will be held open while the tunnel is idle.\n\nIf unset, the default timeout is 1h", - "tlsInspectDelay": "tlsInspectDelay defines how long the router can hold data to find a matching route.\n\nSetting this too short can cause the router to fall back to the default certificate for edge-terminated or reencrypt routes even when a better matching certificate could be used.\n\nIf unset, the default inspect delay is 5s", - "healthCheckInterval": "healthCheckInterval defines how long the router waits between two consecutive health checks on its configured backends. This value is applied globally as a default for all routes, but may be overridden per-route by the route annotation \"router.openshift.io/haproxy.health.check.interval\".\n\nExpects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nSetting this to less than 5s can cause excess traffic due to too frequent TCP health checks and accompanying SYN packet storms. Alternatively, setting this too high can result in increased latency, due to backend servers that are no longer available, but haven't yet been detected as such.\n\nAn empty or zero healthCheckInterval means no opinion and IngressController chooses a default, which is subject to change over time. Currently the default healthCheckInterval value is 5s.\n\nCurrently the minimum allowed value is 1s and the maximum allowed value is 2147483647ms (24.85 days). Both are subject to change over time.", - "maxConnections": "maxConnections defines the maximum number of simultaneous connections that can be established per HAProxy process. Increasing this value allows each ingress controller pod to handle more connections but at the cost of additional system resources being consumed.\n\nPermitted values are: empty, 0, -1, and the range 2000-2000000.\n\nIf this field is empty or 0, the IngressController will use the default value of 20000, but the default is subject to change in future releases.\n\nIf the value is -1 then HAProxy will dynamically compute a maximum value based on the available ulimits in the running container. Selecting -1 (i.e., auto) will result in a large value being computed (~520000 on OpenShift >=4.10 clusters) and therefore each HAProxy process will incur significant memory usage compared to the current default of 20000.\n\nSetting a value that is greater than the current operating system limit will prevent the HAProxy process from starting.\n\nIf you choose a discrete value (e.g., 750000) and the router pod is migrated to a new node, there's no guarantee that that new node has identical ulimits configured. In such a scenario the pod would fail to start. If you have nodes with different ulimits configured (e.g., different tuned profiles) and you choose a discrete value then the guidance is to use -1 and let the value be computed dynamically at runtime.\n\nYou can monitor memory usage for router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}'.\n\nYou can monitor memory usage of individual HAProxy processes in router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}/container_processes{container=\"router\",namespace=\"openshift-ingress\"}'.", -} - -func (IngressControllerTuningOptions) SwaggerDoc() map[string]string { - return map_IngressControllerTuningOptions -} - -var map_LoadBalancerStrategy = map[string]string{ - "": "LoadBalancerStrategy holds parameters for a load balancer.", - "scope": "scope indicates the scope at which the load balancer is exposed. Possible values are \"External\" and \"Internal\".", - "providerParameters": "providerParameters holds desired load balancer information specific to the underlying infrastructure provider.\n\nIf empty, defaults will be applied. See specific providerParameters fields for details about their defaults.", -} - -func (LoadBalancerStrategy) SwaggerDoc() map[string]string { - return map_LoadBalancerStrategy -} - -var map_LoggingDestination = map[string]string{ - "": "LoggingDestination describes a destination for log messages.", - "type": "type is the type of destination for logs. It must be one of the following:\n\n* Container\n\nThe ingress operator configures the sidecar container named \"logs\" on the ingress controller pod and configures the ingress controller to write logs to the sidecar. The logs are then available as container logs. The expectation is that the administrator configures a custom logging solution that reads logs from this sidecar. Note that using container logs means that logs may be dropped if the rate of logs exceeds the container runtime's or the custom logging solution's capacity.\n\n* Syslog\n\nLogs are sent to a syslog endpoint. The administrator must specify an endpoint that can receive syslog messages. The expectation is that the administrator has configured a custom syslog instance.", - "syslog": "syslog holds parameters for a syslog endpoint. Present only if type is Syslog.", - "container": "container holds parameters for the Container logging destination. Present only if type is Container.", -} - -func (LoggingDestination) SwaggerDoc() map[string]string { - return map_LoggingDestination -} - -var map_NodePlacement = map[string]string{ - "": "NodePlacement describes node scheduling configuration for an ingress controller.", - "nodeSelector": "nodeSelector is the node selector applied to ingress controller deployments.\n\nIf set, the specified selector is used and replaces the default.\n\nIf unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nWhen defaultPlacement is Workers, the default is:\n\n kubernetes.io/os: linux\n node-role.kubernetes.io/worker: ''\n\nWhen defaultPlacement is ControlPlane, the default is:\n\n kubernetes.io/os: linux\n node-role.kubernetes.io/master: ''\n\nThese defaults are subject to change.", - "tolerations": "tolerations is a list of tolerations applied to ingress controller deployments.\n\nThe default is an empty list.\n\nSee https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/", -} - -func (NodePlacement) SwaggerDoc() map[string]string { - return map_NodePlacement -} - -var map_NodePortStrategy = map[string]string{ - "": "NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy.", - "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", -} - -func (NodePortStrategy) SwaggerDoc() map[string]string { - return map_NodePortStrategy -} - -var map_PrivateStrategy = map[string]string{ - "": "PrivateStrategy holds parameters for the Private endpoint publishing strategy.", -} - -func (PrivateStrategy) SwaggerDoc() map[string]string { - return map_PrivateStrategy -} - -var map_ProviderLoadBalancerParameters = map[string]string{ - "": "ProviderLoadBalancerParameters holds desired load balancer information specific to the underlying infrastructure provider.", - "type": "type is the underlying infrastructure provider for the load balancer. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Nutanix\", \"OpenStack\", and \"VSphere\".", - "aws": "aws provides configuration settings that are specific to AWS load balancers.\n\nIf empty, defaults will be applied. See specific aws fields for details about their defaults.", - "gcp": "gcp provides configuration settings that are specific to GCP load balancers.\n\nIf empty, defaults will be applied. See specific gcp fields for details about their defaults.", -} - -func (ProviderLoadBalancerParameters) SwaggerDoc() map[string]string { - return map_ProviderLoadBalancerParameters -} - -var map_RouteAdmissionPolicy = map[string]string{ - "": "RouteAdmissionPolicy is an admission policy for allowing new route claims.", - "namespaceOwnership": "namespaceOwnership describes how host name claims across namespaces should be handled.\n\nValue must be one of:\n\n- Strict: Do not allow routes in different namespaces to claim the same host.\n\n- InterNamespaceAllowed: Allow routes to claim different paths of the same\n host name across namespaces.\n\nIf empty, the default is Strict.", - "wildcardPolicy": "wildcardPolicy describes how routes with wildcard policies should be handled for the ingress controller. WildcardPolicy controls use of routes [1] exposed by the ingress controller based on the route's wildcard policy.\n\n[1] https://github.com/openshift/api/blob/master/route/v1/types.go\n\nNote: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed will cause admitted routes with a wildcard policy of Subdomain to stop working. These routes must be updated to a wildcard policy of None to be readmitted by the ingress controller.\n\nWildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values.\n\nIf empty, defaults to \"WildcardsDisallowed\".", -} - -func (RouteAdmissionPolicy) SwaggerDoc() map[string]string { - return map_RouteAdmissionPolicy -} - -var map_SyslogLoggingDestinationParameters = map[string]string{ - "": "SyslogLoggingDestinationParameters describes parameters for the Syslog logging destination type.", - "address": "address is the IP address of the syslog endpoint that receives log messages.", - "port": "port is the UDP port number of the syslog endpoint that receives log messages.", - "facility": "facility specifies the syslog facility of log messages.\n\nIf this field is empty, the facility is \"local1\".", - "maxLength": "maxLength is the maximum length of the syslog message\n\nIf this field is empty, the maxLength is set to \"1024\".", -} - -func (SyslogLoggingDestinationParameters) SwaggerDoc() map[string]string { - return map_SyslogLoggingDestinationParameters -} - -var map_KubeAPIServer = map[string]string{ - "": "KubeAPIServer provides information to configure an operator to manage kube-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the Kubernetes API Server", - "status": "status is the most recently observed status of the Kubernetes API Server", -} - -func (KubeAPIServer) SwaggerDoc() map[string]string { - return map_KubeAPIServer -} - -var map_KubeAPIServerList = map[string]string{ - "": "KubeAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", -} - -func (KubeAPIServerList) SwaggerDoc() map[string]string { - return map_KubeAPIServerList -} - -var map_KubeControllerManager = map[string]string{ - "": "KubeControllerManager provides information to configure an operator to manage kube-controller-manager.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the Kubernetes Controller Manager", - "status": "status is the most recently observed status of the Kubernetes Controller Manager", -} - -func (KubeControllerManager) SwaggerDoc() map[string]string { - return map_KubeControllerManager -} - -var map_KubeControllerManagerList = map[string]string{ - "": "KubeControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", -} - -func (KubeControllerManagerList) SwaggerDoc() map[string]string { - return map_KubeControllerManagerList -} - -var map_KubeControllerManagerSpec = map[string]string{ - "useMoreSecureServiceCA": "useMoreSecureServiceCA indicates that the service-ca.crt provided in SA token volumes should include only enough certificates to validate service serving certificates. Once set to true, it cannot be set to false. Even if someone finds a way to set it back to false, the service-ca.crt files that previously existed will only have the more secure content.", -} - -func (KubeControllerManagerSpec) SwaggerDoc() map[string]string { - return map_KubeControllerManagerSpec -} - -var map_KubeStorageVersionMigrator = map[string]string{ - "": "KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (KubeStorageVersionMigrator) SwaggerDoc() map[string]string { - return map_KubeStorageVersionMigrator -} - -var map_KubeStorageVersionMigratorList = map[string]string{ - "": "KubeStorageVersionMigratorList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", -} - -func (KubeStorageVersionMigratorList) SwaggerDoc() map[string]string { - return map_KubeStorageVersionMigratorList -} - -var map_AdditionalNetworkDefinition = map[string]string{ - "": "AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one \"Config\" that matches the type.", - "type": "type is the type of network The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan", - "name": "name is the name of the network. This will be populated in the resulting CRD This must be unique.", - "namespace": "namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace.", - "rawCNIConfig": "rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD", - "simpleMacvlanConfig": "SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan", -} - -func (AdditionalNetworkDefinition) SwaggerDoc() map[string]string { - return map_AdditionalNetworkDefinition -} - -var map_ClusterNetworkEntry = map[string]string{ - "": "ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks", -} - -func (ClusterNetworkEntry) SwaggerDoc() map[string]string { - return map_ClusterNetworkEntry -} - -var map_DefaultNetworkDefinition = map[string]string{ - "": "DefaultNetworkDefinition represents a single network plugin's configuration. type must be specified, along with exactly one \"Config\" that matches the type.", - "type": "type is the type of network All NetworkTypes are supported except for NetworkTypeRaw", - "openshiftSDNConfig": "openShiftSDNConfig configures the openshift-sdn plugin", - "ovnKubernetesConfig": "ovnKubernetesConfig configures the ovn-kubernetes plugin.", - "kuryrConfig": "KuryrConfig configures the kuryr plugin", -} - -func (DefaultNetworkDefinition) SwaggerDoc() map[string]string { - return map_DefaultNetworkDefinition -} - -var map_ExportNetworkFlows = map[string]string{ - "netFlow": "netFlow defines the NetFlow configuration.", - "sFlow": "sFlow defines the SFlow configuration.", - "ipfix": "ipfix defines IPFIX configuration.", -} - -func (ExportNetworkFlows) SwaggerDoc() map[string]string { - return map_ExportNetworkFlows -} - -var map_GatewayConfig = map[string]string{ - "": "GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides", - "routingViaHost": "RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified.", -} - -func (GatewayConfig) SwaggerDoc() map[string]string { - return map_GatewayConfig -} - -var map_HybridOverlayConfig = map[string]string{ - "hybridClusterNetwork": "HybridClusterNetwork defines a network space given to nodes on an additional overlay network.", - "hybridOverlayVXLANPort": "HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789", -} - -func (HybridOverlayConfig) SwaggerDoc() map[string]string { - return map_HybridOverlayConfig -} - -var map_IPAMConfig = map[string]string{ - "": "IPAMConfig contains configurations for IPAM (IP Address Management)", - "type": "Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic", - "staticIPAMConfig": "StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic", -} - -func (IPAMConfig) SwaggerDoc() map[string]string { - return map_IPAMConfig -} - -var map_IPFIXConfig = map[string]string{ - "collectors": "ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items", -} - -func (IPFIXConfig) SwaggerDoc() map[string]string { - return map_IPFIXConfig -} - -var map_KuryrConfig = map[string]string{ - "": "KuryrConfig configures the Kuryr-Kubernetes SDN", - "daemonProbesPort": "The port kuryr-daemon will listen for readiness and liveness requests.", - "controllerProbesPort": "The port kuryr-controller will listen for readiness and liveness requests.", - "openStackServiceNetwork": "openStackServiceNetwork contains the CIDR of network from which to allocate IPs for OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses two IPs from that network for each loadbalancer - one given by OpenShift and second for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix size by 1.", - "enablePortPoolsPrepopulation": "enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create one. It creates a number of ports when the first pod that is configured to use the dedicated network for pods is created in a namespace, and keeps them ready to be attached to pods. Port prepopulation is disabled by default.", - "poolMaxPorts": "poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. If the number of ports exceeds this setting, free ports will get deleted. Setting 0 will disable this upper bound, effectively preventing pools from shrinking and this is the default value. For more information about port pools see enablePortPoolsPrepopulation setting.", - "poolMinPorts": "poolMinPorts sets a minimum number of free ports that should be kept in a port pool. If the number of ports is lower than this setting, new ports will get created and added to pool. The default is 1. For more information about port pools see enablePortPoolsPrepopulation setting.", - "poolBatchPorts": "poolBatchPorts sets a number of ports that should be created in a single batch request to extend the port pool. The default is 3. For more information about port pools see enablePortPoolsPrepopulation setting.", - "mtu": "mtu is the MTU that Kuryr should use when creating pod networks in Neutron. The value has to be lower or equal to the MTU of the nodes network and Neutron has to allow creation of tenant networks with such MTU. If unset Pod networks will be created with the same MTU as the nodes network has.", -} - -func (KuryrConfig) SwaggerDoc() map[string]string { - return map_KuryrConfig -} - -var map_MTUMigration = map[string]string{ - "": "MTUMigration MTU contains infomation about MTU migration.", - "network": "network contains information about MTU migration for the default network. Migrations are only allowed to MTU values lower than the machine's uplink MTU by the minimum appropriate offset.", - "machine": "machine contains MTU migration configuration for the machine's uplink. Needs to be migrated along with the default network MTU unless the current uplink MTU already accommodates the default network MTU.", -} - -func (MTUMigration) SwaggerDoc() map[string]string { - return map_MTUMigration -} - -var map_MTUMigrationValues = map[string]string{ - "": "MTUMigrationValues contains the values for a MTU migration.", - "to": "to is the MTU to migrate to.", - "from": "from is the MTU to migrate from.", -} - -func (MTUMigrationValues) SwaggerDoc() map[string]string { - return map_MTUMigrationValues -} - -var map_NetFlowConfig = map[string]string{ - "collectors": "netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. It is a list of strings formatted as ip:port with a maximum of ten items", -} - -func (NetFlowConfig) SwaggerDoc() map[string]string { - return map_NetFlowConfig -} - -var map_Network = map[string]string{ - "": "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (Network) SwaggerDoc() map[string]string { - return map_Network -} - -var map_NetworkList = map[string]string{ - "": "NetworkList contains a list of Network configurations\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (NetworkList) SwaggerDoc() map[string]string { - return map_NetworkList -} - -var map_NetworkMigration = map[string]string{ - "": "NetworkMigration represents the cluster network configuration.", - "networkType": "networkType is the target type of network migration. Set this to the target network type to allow changing the default network. If unset, the operation of changing cluster default network plugin will be rejected. The supported values are OpenShiftSDN, OVNKubernetes", - "mtu": "mtu contains the MTU migration configuration. Set this to allow changing the MTU values for the default network. If unset, the operation of changing the MTU for the default network will be rejected.", -} - -func (NetworkMigration) SwaggerDoc() map[string]string { - return map_NetworkMigration -} - -var map_NetworkSpec = map[string]string{ - "": "NetworkSpec is the top-level network configuration object.", - "clusterNetwork": "clusterNetwork is the IP address pool to use for pod IPs. Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr.", - "serviceNetwork": "serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth.", - "defaultNetwork": "defaultNetwork is the \"default\" network that all pods will receive", - "additionalNetworks": "additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled.", - "disableMultiNetwork": "disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled.", - "useMultiNetworkPolicy": "useMultiNetworkPolicy enables a controller which allows for MultiNetworkPolicy objects to be used on additional networks as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy objects, but NetworkPolicy objects only apply to the primary interface. With MultiNetworkPolicy, you can control the traffic that a pod can receive over the secondary interfaces. If unset, this property defaults to 'false' and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is 'true' then the value of this field is ignored.", - "deployKubeProxy": "deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when OpenShift SDN and ovn-kubernetes are used and true otherwise.", - "disableNetworkDiagnostics": "disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck CRs from a test pod to every node, apiserver and LB should be disabled or not. If unset, this property defaults to 'false' and network diagnostics is enabled. Setting this to 'true' would reduce the additional load of the pods performing the checks.", - "kubeProxyConfig": "kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn.", - "exportNetworkFlows": "exportNetworkFlows enables and configures the export of network flow metadata from the pod network by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. If unset, flows will not be exported to any collector.", - "migration": "migration enables and configures the cluster network migration. The migration procedure allows to change the network type and the MTU.", -} - -func (NetworkSpec) SwaggerDoc() map[string]string { - return map_NetworkSpec -} - -var map_NetworkStatus = map[string]string{ - "": "NetworkStatus is detailed operator status, which is distilled up to the Network clusteroperator object.", -} - -func (NetworkStatus) SwaggerDoc() map[string]string { - return map_NetworkStatus -} - -var map_OVNKubernetesConfig = map[string]string{ - "": "ovnKubernetesConfig contains the configuration parameters for networks using the ovn-kubernetes network project", - "mtu": "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400", - "genevePort": "geneve port is the UDP port to be used by geneve encapulation. Default is 6081", - "hybridOverlayConfig": "HybridOverlayConfig configures an additional overlay network for peers that are not using OVN.", - "ipsecConfig": "ipsecConfig enables and configures IPsec for pods on the pod network within the cluster.", - "policyAuditConfig": "policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used.", - "gatewayConfig": "gatewayConfig holds the configuration for node gateway options.", -} - -func (OVNKubernetesConfig) SwaggerDoc() map[string]string { - return map_OVNKubernetesConfig -} - -var map_OpenShiftSDNConfig = map[string]string{ - "": "OpenShiftSDNConfig configures the three openshift-sdn plugins", - "mode": "mode is one of \"Multitenant\", \"Subnet\", or \"NetworkPolicy\"", - "vxlanPort": "vxlanPort is the port to use for all vxlan packets. The default is 4789.", - "mtu": "mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink.", - "useExternalOpenvswitch": "useExternalOpenvswitch used to control whether the operator would deploy an OVS DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always run as a system service, and this flag is ignored. DEPRECATED: non-functional as of 4.6", - "enableUnidling": "enableUnidling controls whether or not the service proxy will support idling and unidling of services. By default, unidling is enabled.", -} - -func (OpenShiftSDNConfig) SwaggerDoc() map[string]string { - return map_OpenShiftSDNConfig -} - -var map_PolicyAuditConfig = map[string]string{ - "rateLimit": "rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used.", - "maxFileSize": "maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs Units are in MB and the Default is 50MB", - "destination": "destination is the location for policy log messages. Regardless of this config, persistent logs will always be dumped to the host at /var/log/ovn/ however Additionally syslog output may be configured as follows. Valid values are: - \"libc\" -> to use the libc syslog() function of the host node's journdald process - \"udp:host:port\" -> for sending syslog over UDP - \"unix:file\" -> for using the UNIX domain socket directly - \"null\" -> to discard all messages logged to syslog The default is \"null\"", - "syslogFacility": "syslogFacility the RFC5424 facility for generated messages, e.g. \"kern\". Default is \"local0\"", -} - -func (PolicyAuditConfig) SwaggerDoc() map[string]string { - return map_PolicyAuditConfig -} - -var map_ProxyConfig = map[string]string{ - "": "ProxyConfig defines the configuration knobs for kubeproxy All of these are optional and have sensible defaults", - "iptablesSyncPeriod": "An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted in large clusters for performance reasons, but this is no longer necessary, and there is no reason to change this from the default value. Default: 30s", - "bindAddress": "The address to \"bind\" on Defaults to 0.0.0.0", - "proxyArguments": "Any additional arguments to pass to the kubeproxy process", -} - -func (ProxyConfig) SwaggerDoc() map[string]string { - return map_ProxyConfig -} - -var map_SFlowConfig = map[string]string{ - "collectors": "sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items", -} - -func (SFlowConfig) SwaggerDoc() map[string]string { - return map_SFlowConfig -} - -var map_SimpleMacvlanConfig = map[string]string{ - "": "SimpleMacvlanConfig contains configurations for macvlan interface.", - "master": "master is the host interface to create the macvlan interface from. If not specified, it will be default route interface", - "ipamConfig": "IPAMConfig configures IPAM module will be used for IP Address Management (IPAM).", - "mode": "mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge", - "mtu": "mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value.", -} - -func (SimpleMacvlanConfig) SwaggerDoc() map[string]string { - return map_SimpleMacvlanConfig -} - -var map_StaticIPAMAddresses = map[string]string{ - "": "StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses", - "address": "Address is the IP address in CIDR format", - "gateway": "Gateway is IP inside of subnet to designate as the gateway", -} - -func (StaticIPAMAddresses) SwaggerDoc() map[string]string { - return map_StaticIPAMAddresses -} - -var map_StaticIPAMConfig = map[string]string{ - "": "StaticIPAMConfig contains configurations for static IPAM (IP Address Management)", - "addresses": "Addresses configures IP address for the interface", - "routes": "Routes configures IP routes for the interface", - "dns": "DNS configures DNS for the interface", -} - -func (StaticIPAMConfig) SwaggerDoc() map[string]string { - return map_StaticIPAMConfig -} - -var map_StaticIPAMDNS = map[string]string{ - "": "StaticIPAMDNS provides DNS related information for static IPAM", - "nameservers": "Nameservers points DNS servers for IP lookup", - "domain": "Domain configures the domainname the local domain used for short hostname lookups", - "search": "Search configures priority ordered search domains for short hostname lookups", -} - -func (StaticIPAMDNS) SwaggerDoc() map[string]string { - return map_StaticIPAMDNS -} - -var map_StaticIPAMRoutes = map[string]string{ - "": "StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes", - "destination": "Destination points the IP route destination", - "gateway": "Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin).", -} - -func (StaticIPAMRoutes) SwaggerDoc() map[string]string { - return map_StaticIPAMRoutes -} - -var map_OpenShiftAPIServer = map[string]string{ - "": "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the OpenShift API Server.", - "status": "status defines the observed status of the OpenShift API Server.", -} - -func (OpenShiftAPIServer) SwaggerDoc() map[string]string { - return map_OpenShiftAPIServer -} - -var map_OpenShiftAPIServerList = map[string]string{ - "": "OpenShiftAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", -} - -func (OpenShiftAPIServerList) SwaggerDoc() map[string]string { - return map_OpenShiftAPIServerList -} - -var map_OpenShiftAPIServerStatus = map[string]string{ - "latestAvailableRevision": "latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", -} - -func (OpenShiftAPIServerStatus) SwaggerDoc() map[string]string { - return map_OpenShiftAPIServerStatus -} - -var map_OpenShiftControllerManager = map[string]string{ - "": "OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (OpenShiftControllerManager) SwaggerDoc() map[string]string { - return map_OpenShiftControllerManager -} - -var map_OpenShiftControllerManagerList = map[string]string{ - "": "OpenShiftControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", -} - -func (OpenShiftControllerManagerList) SwaggerDoc() map[string]string { - return map_OpenShiftControllerManagerList -} - -var map_KubeScheduler = map[string]string{ - "": "KubeScheduler provides information to configure an operator to manage scheduler.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the Kubernetes Scheduler", - "status": "status is the most recently observed status of the Kubernetes Scheduler", -} - -func (KubeScheduler) SwaggerDoc() map[string]string { - return map_KubeScheduler -} - -var map_KubeSchedulerList = map[string]string{ - "": "KubeSchedulerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", -} - -func (KubeSchedulerList) SwaggerDoc() map[string]string { - return map_KubeSchedulerList -} - -var map_ServiceCA = map[string]string{ - "": "ServiceCA provides information to configure an operator to manage the service cert controllers\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (ServiceCA) SwaggerDoc() map[string]string { - return map_ServiceCA -} - -var map_ServiceCAList = map[string]string{ - "": "ServiceCAList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", -} - -func (ServiceCAList) SwaggerDoc() map[string]string { - return map_ServiceCAList -} - -var map_ServiceCatalogAPIServer = map[string]string{ - "": "ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (ServiceCatalogAPIServer) SwaggerDoc() map[string]string { - return map_ServiceCatalogAPIServer -} - -var map_ServiceCatalogAPIServerList = map[string]string{ - "": "ServiceCatalogAPIServerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", -} - -func (ServiceCatalogAPIServerList) SwaggerDoc() map[string]string { - return map_ServiceCatalogAPIServerList -} - -var map_ServiceCatalogControllerManager = map[string]string{ - "": "ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (ServiceCatalogControllerManager) SwaggerDoc() map[string]string { - return map_ServiceCatalogControllerManager -} - -var map_ServiceCatalogControllerManagerList = map[string]string{ - "": "ServiceCatalogControllerManagerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", -} - -func (ServiceCatalogControllerManagerList) SwaggerDoc() map[string]string { - return map_ServiceCatalogControllerManagerList -} - -var map_Storage = map[string]string{ - "": "Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (Storage) SwaggerDoc() map[string]string { - return map_Storage -} - -var map_StorageList = map[string]string{ - "": "StorageList contains a list of Storages.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", -} - -func (StorageList) SwaggerDoc() map[string]string { - return map_StorageList -} - -var map_StorageSpec = map[string]string{ - "": "StorageSpec is the specification of the desired behavior of the cluster storage operator.", -} - -func (StorageSpec) SwaggerDoc() map[string]string { - return map_StorageSpec -} - -var map_StorageStatus = map[string]string{ - "": "StorageStatus defines the observed status of the cluster storage operator.", -} - -func (StorageStatus) SwaggerDoc() map[string]string { - return map_StorageStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/custom-resource-status/LICENSE b/vendor/github.com/openshift/custom-resource-status/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/openshift/custom-resource-status/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/openshift/custom-resource-status/objectreferences/v1/_doc.go b/vendor/github.com/openshift/custom-resource-status/objectreferences/v1/_doc.go deleted file mode 100644 index ce7055f09..000000000 --- a/vendor/github.com/openshift/custom-resource-status/objectreferences/v1/_doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// +k8s:deepcopy-gen=package,register -// +k8s:defaulter-gen=TypeMeta -// +k8s:openapi-gen=true - -// Package v1 provides version v1 of the functions necessary to -// manage and inspect a slice of object references. This can be -// used to add a RelatedObjects field on the status of your custom -// resource, adding objects that your operator manages to the status. -package v1 diff --git a/vendor/github.com/openshift/custom-resource-status/objectreferences/v1/objectreferences.go b/vendor/github.com/openshift/custom-resource-status/objectreferences/v1/objectreferences.go deleted file mode 100644 index c0af5f1cc..000000000 --- a/vendor/github.com/openshift/custom-resource-status/objectreferences/v1/objectreferences.go +++ /dev/null @@ -1,108 +0,0 @@ -package v1 - -import ( - "errors" - - corev1 "k8s.io/api/core/v1" -) - -var errMinObjectRef = errors.New("object reference must have, at a minimum: apiVersion, kind, and name") - -// SetObjectReference - updates list of object references based on newObject -func SetObjectReference(objects *[]corev1.ObjectReference, newObject corev1.ObjectReference) error { - if !minObjectReference(newObject) { - return errMinObjectRef - } - - if objects == nil { - objects = &[]corev1.ObjectReference{} - } - existingObject, err := FindObjectReference(*objects, newObject) - if err != nil { - return err - } - if existingObject == nil { // add it to the slice - *objects = append(*objects, newObject) - } else { // update found reference - *existingObject = newObject - } - return nil -} - -// RemoveObjectReference - updates list of object references to remove rmObject -func RemoveObjectReference(objects *[]corev1.ObjectReference, rmObject corev1.ObjectReference) error { - if !minObjectReference(rmObject) { - return errMinObjectRef - } - - if objects == nil { - return nil - } - newObjectReferences := []corev1.ObjectReference{} - // TODO: this is incredibly inefficient. If the performance hit becomes a - // problem this should be improved. - for _, object := range *objects { - if !ObjectReferenceEqual(object, rmObject) { - newObjectReferences = append(newObjectReferences, object) - } - } - - *objects = newObjectReferences - return nil -} - -// FindObjectReference - finds the first ObjectReference in a slice of objects -// matching find. -func FindObjectReference(objects []corev1.ObjectReference, find corev1.ObjectReference) (*corev1.ObjectReference, error) { - if !minObjectReference(find) { - return nil, errMinObjectRef - } - - for i := range objects { - if ObjectReferenceEqual(find, objects[i]) { - return &objects[i], nil - } - } - - return nil, nil -} - -// ObjectReferenceEqual - compares gotRef to expectedRef -// preference order: APIVersion, Kind, Name, and Namespace -// if either gotRef or expectedRef fail minObjectReference test, this function -// will simply return false -func ObjectReferenceEqual(gotRef, expectedRef corev1.ObjectReference) bool { - if !minObjectReference(gotRef) || !minObjectReference(expectedRef) { - return false - } - if gotRef.APIVersion != expectedRef.APIVersion { - return false - } - if gotRef.Kind != expectedRef.Kind { - return false - } - if gotRef.Name != expectedRef.Name { - return false - } - if expectedRef.Namespace != "" && (gotRef.Namespace != expectedRef.Namespace) { - return false - } - return true -} - -// in order to have any meaningful semantics on this we need to -// ensuer that some minimal amount of information is provided in -// the object reference -func minObjectReference(objRef corev1.ObjectReference) bool { - if objRef.APIVersion == "" { - return false - } - if objRef.Kind == "" { - return false - } - if objRef.Name == "" { - return false - } - - return true -} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/admissionregistration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/admissionregistration.go deleted file mode 100644 index 2fcfd1394..000000000 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/admissionregistration.go +++ /dev/null @@ -1,51 +0,0 @@ -package resourcemerge - -import ( - operatorsv1 "github.com/openshift/api/operator/v1" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// ExpectedMutatingWebhooksConfiguration returns last applied generation for MutatingWebhookConfiguration resource registered in operator -func ExpectedMutatingWebhooksConfiguration(name string, previousGenerations []operatorsv1.GenerationStatus) int64 { - generation := GenerationFor(previousGenerations, schema.GroupResource{Group: admissionregistrationv1.SchemeGroupVersion.Group, Resource: "mutatingwebhookconfigurations"}, "", name) - if generation != nil { - return generation.LastGeneration - } - return -1 -} - -// SetMutatingWebhooksConfigurationGeneration updates operator generation status list with last applied generation for provided MutatingWebhookConfiguration resource -func SetMutatingWebhooksConfigurationGeneration(generations *[]operatorsv1.GenerationStatus, actual *admissionregistrationv1.MutatingWebhookConfiguration) { - if actual == nil { - return - } - SetGeneration(generations, operatorsv1.GenerationStatus{ - Group: admissionregistrationv1.SchemeGroupVersion.Group, - Resource: "mutatingwebhookconfigurations", - Name: actual.Name, - LastGeneration: actual.ObjectMeta.Generation, - }) -} - -// ExpectedValidatingWebhooksConfiguration returns last applied generation for ValidatingWebhookConfiguration resource registered in operator -func ExpectedValidatingWebhooksConfiguration(name string, previousGenerations []operatorsv1.GenerationStatus) int64 { - generation := GenerationFor(previousGenerations, schema.GroupResource{Group: admissionregistrationv1.SchemeGroupVersion.Group, Resource: "validatingwebhookconfigurations"}, "", name) - if generation != nil { - return generation.LastGeneration - } - return -1 -} - -// SetValidatingWebhooksConfigurationGeneration updates operator generation status list with last applied generation for provided ValidatingWebhookConfiguration resource -func SetValidatingWebhooksConfigurationGeneration(generations *[]operatorsv1.GenerationStatus, actual *admissionregistrationv1.ValidatingWebhookConfiguration) { - if actual == nil { - return - } - SetGeneration(generations, operatorsv1.GenerationStatus{ - Group: admissionregistrationv1.SchemeGroupVersion.Group, - Resource: "validatingwebhookconfigurations", - Name: actual.Name, - LastGeneration: actual.ObjectMeta.Generation, - }) -} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go deleted file mode 100644 index 754a5aabe..000000000 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go +++ /dev/null @@ -1,68 +0,0 @@ -package resourcemerge - -import ( - "strings" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/api/equality" - utilpointer "k8s.io/utils/pointer" -) - -// EnsureCustomResourceDefinitionV1Beta1 ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureCustomResourceDefinitionV1Beta1(modified *bool, existing *apiextensionsv1beta1.CustomResourceDefinition, required apiextensionsv1beta1.CustomResourceDefinition) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - - // we stomp everything - if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) { - *modified = true - existing.Spec = required.Spec - } -} - -// EnsureCustomResourceDefinitionV1 ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureCustomResourceDefinitionV1(modified *bool, existing *apiextensionsv1.CustomResourceDefinition, required apiextensionsv1.CustomResourceDefinition) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - - // we need to match defaults - mimicCRDV1Defaulting(&required) - // we stomp everything - if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) { - *modified = true - existing.Spec = required.Spec - } -} - -func mimicCRDV1Defaulting(required *apiextensionsv1.CustomResourceDefinition) { - crd_SetDefaults_CustomResourceDefinitionSpec(&required.Spec) - - if required.Spec.Conversion != nil && - required.Spec.Conversion.Webhook != nil && - required.Spec.Conversion.Webhook.ClientConfig != nil && - required.Spec.Conversion.Webhook.ClientConfig.Service != nil { - crd_SetDefaults_ServiceReference(required.Spec.Conversion.Webhook.ClientConfig.Service) - } -} - -// lifted from https://github.com/kubernetes/kubernetes/blob/v1.21.0/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go#L42-L61 -func crd_SetDefaults_CustomResourceDefinitionSpec(obj *apiextensionsv1.CustomResourceDefinitionSpec) { - if len(obj.Names.Singular) == 0 { - obj.Names.Singular = strings.ToLower(obj.Names.Kind) - } - if len(obj.Names.ListKind) == 0 && len(obj.Names.Kind) > 0 { - obj.Names.ListKind = obj.Names.Kind + "List" - } - if obj.Conversion == nil { - obj.Conversion = &apiextensionsv1.CustomResourceConversion{ - Strategy: apiextensionsv1.NoneConverter, - } - } -} - -func crd_SetDefaults_ServiceReference(obj *apiextensionsv1.ServiceReference) { - if obj.Port == nil { - obj.Port = utilpointer.Int32Ptr(443) - } -} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go deleted file mode 100644 index 1731382e6..000000000 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go +++ /dev/null @@ -1,80 +0,0 @@ -package resourcemerge - -import ( - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - - operatorsv1 "github.com/openshift/api/operator/v1" -) - -func GenerationFor(generations []operatorsv1.GenerationStatus, resource schema.GroupResource, namespace, name string) *operatorsv1.GenerationStatus { - for i := range generations { - curr := &generations[i] - if curr.Namespace == namespace && - curr.Name == name && - curr.Group == resource.Group && - curr.Resource == resource.Resource { - - return curr - } - } - - return nil -} - -func SetGeneration(generations *[]operatorsv1.GenerationStatus, newGeneration operatorsv1.GenerationStatus) { - if generations == nil { - generations = &[]operatorsv1.GenerationStatus{} - } - - existingGeneration := GenerationFor(*generations, schema.GroupResource{Group: newGeneration.Group, Resource: newGeneration.Resource}, newGeneration.Namespace, newGeneration.Name) - if existingGeneration == nil { - *generations = append(*generations, newGeneration) - return - } - - existingGeneration.LastGeneration = newGeneration.LastGeneration - existingGeneration.Hash = newGeneration.Hash -} - -func ExpectedDeploymentGeneration(required *appsv1.Deployment, previousGenerations []operatorsv1.GenerationStatus) int64 { - generation := GenerationFor(previousGenerations, schema.GroupResource{Group: "apps", Resource: "deployments"}, required.Namespace, required.Name) - if generation != nil { - return generation.LastGeneration - } - return -1 -} - -func SetDeploymentGeneration(generations *[]operatorsv1.GenerationStatus, actual *appsv1.Deployment) { - if actual == nil { - return - } - SetGeneration(generations, operatorsv1.GenerationStatus{ - Group: "apps", - Resource: "deployments", - Namespace: actual.Namespace, - Name: actual.Name, - LastGeneration: actual.ObjectMeta.Generation, - }) -} - -func ExpectedDaemonSetGeneration(required *appsv1.DaemonSet, previousGenerations []operatorsv1.GenerationStatus) int64 { - generation := GenerationFor(previousGenerations, schema.GroupResource{Group: "apps", Resource: "daemonsets"}, required.Namespace, required.Name) - if generation != nil { - return generation.LastGeneration - } - return -1 -} - -func SetDaemonSetGeneration(generations *[]operatorsv1.GenerationStatus, actual *appsv1.DaemonSet) { - if actual == nil { - return - } - SetGeneration(generations, operatorsv1.GenerationStatus{ - Group: "apps", - Resource: "daemonsets", - Namespace: actual.Namespace, - Name: actual.Name, - LastGeneration: actual.ObjectMeta.Generation, - }) -} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go deleted file mode 100644 index f1e6d0c9f..000000000 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go +++ /dev/null @@ -1,271 +0,0 @@ -package resourcemerge - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strings" - - "k8s.io/klog/v2" - "sigs.k8s.io/yaml" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - kyaml "k8s.io/apimachinery/pkg/util/yaml" -) - -// MergeConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other -// It returns the resultant configmap and a bool indicating if any changes were made to the configmap -func MergeConfigMap(configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) { - return MergePrunedConfigMap(nil, configMap, configKey, specialCases, configYAMLs...) -} - -// MergePrunedConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other -// It returns the resultant configmap and a bool indicating if any changes were made to the configmap. -// It roundtrips the config through the given schema. -func MergePrunedConfigMap(schema runtime.Object, configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) { - configBytes, err := MergePrunedProcessConfig(schema, specialCases, configYAMLs...) - if err != nil { - return nil, false, err - } - - if reflect.DeepEqual(configMap.Data[configKey], configBytes) { - return configMap, false, nil - } - - ret := configMap.DeepCopy() - ret.Data[configKey] = string(configBytes) - - return ret, true, nil -} - -// MergeProcessConfig merges a series of config yaml files together with each later one overlaying all previous -func MergeProcessConfig(specialCases map[string]MergeFunc, configYAMLs ...[]byte) ([]byte, error) { - currentConfigYAML := configYAMLs[0] - - for _, currConfigYAML := range configYAMLs[1:] { - prevConfigJSON, err := kyaml.ToJSON(currentConfigYAML) - if err != nil { - klog.Warning(err) - // maybe it's just json - prevConfigJSON = currentConfigYAML - } - prevConfig := map[string]interface{}{} - if err := json.NewDecoder(bytes.NewBuffer(prevConfigJSON)).Decode(&prevConfig); err != nil { - return nil, err - } - - if len(currConfigYAML) > 0 { - currConfigJSON, err := kyaml.ToJSON(currConfigYAML) - if err != nil { - klog.Warning(err) - // maybe it's just json - currConfigJSON = currConfigYAML - } - currConfig := map[string]interface{}{} - if err := json.NewDecoder(bytes.NewBuffer(currConfigJSON)).Decode(&currConfig); err != nil { - return nil, err - } - - // protected against mismatched typemeta - prevAPIVersion, _, _ := unstructured.NestedString(prevConfig, "apiVersion") - prevKind, _, _ := unstructured.NestedString(prevConfig, "kind") - currAPIVersion, _, _ := unstructured.NestedString(currConfig, "apiVersion") - currKind, _, _ := unstructured.NestedString(currConfig, "kind") - currGVKSet := len(currAPIVersion) > 0 || len(currKind) > 0 - gvkMismatched := currAPIVersion != prevAPIVersion || currKind != prevKind - if currGVKSet && gvkMismatched { - return nil, fmt.Errorf("%v/%v does not equal %v/%v", currAPIVersion, currKind, prevAPIVersion, prevKind) - } - - if err := mergeConfig(prevConfig, currConfig, "", specialCases); err != nil { - return nil, err - } - } - - currentConfigYAML, err = runtime.Encode(unstructured.UnstructuredJSONScheme, &unstructured.Unstructured{Object: prevConfig}) - if err != nil { - return nil, err - } - } - - return currentConfigYAML, nil -} - -// MergePrunedProcessConfig merges a series of config yaml files together with each later one overlaying all previous. -// The result is roundtripped through the given schema if it is non-nil. -func MergePrunedProcessConfig(schema runtime.Object, specialCases map[string]MergeFunc, configYAMLs ...[]byte) ([]byte, error) { - bs, err := MergeProcessConfig(specialCases, configYAMLs...) - if err != nil { - return nil, err - } - - if schema == nil { - return bs, nil - } - - // roundtrip through the schema - typed := schema.DeepCopyObject() - if err := yaml.Unmarshal(bs, typed); err != nil { - return nil, err - } - typedBytes, err := json.Marshal(typed) - if err != nil { - return nil, err - } - var untypedJSON map[string]interface{} - if err := json.Unmarshal(typedBytes, &untypedJSON); err != nil { - return nil, err - } - - // and intersect output with input because we cannot rely on omitempty in the schema - inputBytes, err := yaml.YAMLToJSON(bs) - if err != nil { - return nil, err - } - var inputJSON map[string]interface{} - if err := json.Unmarshal(inputBytes, &inputJSON); err != nil { - return nil, err - } - return json.Marshal(intersectJSON(inputJSON, untypedJSON)) -} - -type MergeFunc func(dst, src interface{}, currentPath string) (interface{}, error) - -var _ MergeFunc = RemoveConfig - -// RemoveConfig is a merge func that elimintes an entire path from the config -func RemoveConfig(dst, src interface{}, currentPath string) (interface{}, error) { - return dst, nil -} - -// mergeConfig overwrites entries in curr by additional. It modifies curr. -func mergeConfig(curr, additional map[string]interface{}, currentPath string, specialCases map[string]MergeFunc) error { - for additionalKey, additionalVal := range additional { - fullKey := currentPath + "." + additionalKey - specialCase, ok := specialCases[fullKey] - if ok { - var err error - curr[additionalKey], err = specialCase(curr[additionalKey], additionalVal, currentPath) - if err != nil { - return err - } - continue - } - - currVal, ok := curr[additionalKey] - if !ok { - curr[additionalKey] = additionalVal - continue - } - - // only some scalars are accepted - switch castVal := additionalVal.(type) { - case map[string]interface{}: - currValAsMap, ok := currVal.(map[string]interface{}) - if !ok { - currValAsMap = map[string]interface{}{} - curr[additionalKey] = currValAsMap - } - - err := mergeConfig(currValAsMap, castVal, fullKey, specialCases) - if err != nil { - return err - } - continue - - default: - if err := unstructured.SetNestedField(curr, castVal, additionalKey); err != nil { - return err - } - } - - } - - return nil -} - -// jsonIntersection returns the intersection of both JSON object, -// preferring the values of the first argument. -func intersectJSON(x1, x2 map[string]interface{}) map[string]interface{} { - if x1 == nil || x2 == nil { - return nil - } - ret := map[string]interface{}{} - for k, v1 := range x1 { - v2, ok := x2[k] - if !ok { - continue - } - ret[k] = intersectValue(v1, v2) - } - return ret -} - -func intersectArray(x1, x2 []interface{}) []interface{} { - if x1 == nil || x2 == nil { - return nil - } - ret := make([]interface{}, 0, len(x1)) - for i := range x1 { - if i >= len(x2) { - break - } - ret = append(ret, intersectValue(x1[i], x2[i])) - } - return ret -} - -func intersectValue(x1, x2 interface{}) interface{} { - switch x1 := x1.(type) { - case map[string]interface{}: - x2, ok := x2.(map[string]interface{}) - if !ok { - return x1 - } - return intersectJSON(x1, x2) - case []interface{}: - x2, ok := x2.([]interface{}) - if !ok { - return x1 - } - return intersectArray(x1, x2) - default: - return x1 - } -} - -// IsRequiredConfigPresent can check an observedConfig to see if certain required paths are present in that config. -// This allows operators to require certain configuration to be observed before proceeding to honor a configuration or roll it out. -func IsRequiredConfigPresent(config []byte, requiredPaths [][]string) error { - if len(config) == 0 { - return fmt.Errorf("no observedConfig") - } - - existingConfig := map[string]interface{}{} - if err := json.NewDecoder(bytes.NewBuffer(config)).Decode(&existingConfig); err != nil { - return fmt.Errorf("error parsing config, %v", err) - } - - for _, requiredPath := range requiredPaths { - configVal, found, err := unstructured.NestedFieldNoCopy(existingConfig, requiredPath...) - if err != nil { - return fmt.Errorf("error reading %v from config, %v", strings.Join(requiredPath, "."), err) - } - if !found { - return fmt.Errorf("%v missing from config", strings.Join(requiredPath, ".")) - } - if configVal == nil { - return fmt.Errorf("%v null in config", strings.Join(requiredPath, ".")) - } - if configValSlice, ok := configVal.([]interface{}); ok && len(configValSlice) == 0 { - return fmt.Errorf("%v empty in config", strings.Join(requiredPath, ".")) - } - if configValString, ok := configVal.(string); ok && len(configValString) == 0 { - return fmt.Errorf("%v empty in config", strings.Join(requiredPath, ".")) - } - } - return nil -} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go deleted file mode 100644 index 4881c4b8a..000000000 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go +++ /dev/null @@ -1,277 +0,0 @@ -package resourcemerge - -import ( - "reflect" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// EnsureObjectMeta writes namespace, name, labels, and annotations. Don't set other things here. -// TODO finalizer support maybe? -func EnsureObjectMeta(modified *bool, existing *metav1.ObjectMeta, required metav1.ObjectMeta) { - SetStringIfSet(modified, &existing.Namespace, required.Namespace) - SetStringIfSet(modified, &existing.Name, required.Name) - MergeMap(modified, &existing.Labels, required.Labels) - MergeMap(modified, &existing.Annotations, required.Annotations) - MergeOwnerRefs(modified, &existing.OwnerReferences, required.OwnerReferences) -} - -// WithCleanLabelsAndAnnotations cleans the metadata off the removal annotations/labels/ownerrefs -// (those that end with trailing "-") -func WithCleanLabelsAndAnnotations(obj metav1.Object) metav1.Object { - obj.SetAnnotations(cleanRemovalKeys(obj.GetAnnotations())) - obj.SetLabels(cleanRemovalKeys(obj.GetLabels())) - obj.SetOwnerReferences(cleanRemovalOwnerRefs(obj.GetOwnerReferences())) - return obj -} - -func cleanRemovalKeys(required map[string]string) map[string]string { - for k := range required { - if strings.HasSuffix(k, "-") { - delete(required, k) - } - } - return required -} - -func stringPtr(val string) *string { - return &val -} - -func SetString(modified *bool, existing *string, required string) { - if required != *existing { - *existing = required - *modified = true - } -} - -func SetStringIfSet(modified *bool, existing *string, required string) { - if len(required) == 0 { - return - } - if required != *existing { - *existing = required - *modified = true - } -} - -func setStringPtr(modified *bool, existing **string, required *string) { - if *existing == nil || (required == nil && *existing != nil) { - *modified = true - *existing = required - return - } - SetString(modified, *existing, *required) -} - -func SetStringSlice(modified *bool, existing *[]string, required []string) { - if !reflect.DeepEqual(required, *existing) { - *existing = required - *modified = true - } -} - -func SetStringSliceIfSet(modified *bool, existing *[]string, required []string) { - if required == nil { - return - } - if !reflect.DeepEqual(required, *existing) { - *existing = required - *modified = true - } -} - -func BoolPtr(val bool) *bool { - return &val -} - -func SetBool(modified *bool, existing *bool, required bool) { - if required != *existing { - *existing = required - *modified = true - } -} - -func setBoolPtr(modified *bool, existing **bool, required *bool) { - if *existing == nil || (required == nil && *existing != nil) { - *modified = true - *existing = required - return - } - SetBool(modified, *existing, *required) -} - -func int64Ptr(val int64) *int64 { - return &val -} - -func SetInt32(modified *bool, existing *int32, required int32) { - if required != *existing { - *existing = required - *modified = true - } -} - -func SetInt32IfSet(modified *bool, existing *int32, required int32) { - if required == 0 { - return - } - - SetInt32(modified, existing, required) -} - -func SetInt64(modified *bool, existing *int64, required int64) { - if required != *existing { - *existing = required - *modified = true - } -} - -func setInt64Ptr(modified *bool, existing **int64, required *int64) { - if *existing == nil || (required == nil && *existing != nil) { - *modified = true - *existing = required - return - } - SetInt64(modified, *existing, *required) -} - -func MergeMap(modified *bool, existing *map[string]string, required map[string]string) { - if *existing == nil { - *existing = map[string]string{} - } - for k, v := range required { - actualKey := k - removeKey := false - - // if "required" map contains a key with "-" as suffix, remove that - // key from the existing map instead of replacing the value - if strings.HasSuffix(k, "-") { - removeKey = true - actualKey = strings.TrimRight(k, "-") - } - - if existingV, ok := (*existing)[actualKey]; removeKey { - if !ok { - continue - } - // value found -> it should be removed - delete(*existing, actualKey) - *modified = true - - } else if !ok || v != existingV { - *modified = true - (*existing)[actualKey] = v - } - } -} - -func SetMapStringString(modified *bool, existing *map[string]string, required map[string]string) { - if *existing == nil { - *existing = map[string]string{} - } - - if !reflect.DeepEqual(*existing, required) { - *existing = required - } -} - -func SetMapStringStringIfSet(modified *bool, existing *map[string]string, required map[string]string) { - if required == nil { - return - } - if *existing == nil { - *existing = map[string]string{} - } - - if !reflect.DeepEqual(*existing, required) { - *existing = required - } -} - -func MergeOwnerRefs(modified *bool, existing *[]metav1.OwnerReference, required []metav1.OwnerReference) { - if *existing == nil { - *existing = []metav1.OwnerReference{} - } - - for _, o := range required { - removeOwner := false - - // if "required" ownerRefs contain an owner.UID with "-" as suffix, remove that - // ownerRef from the existing ownerRefs instead of replacing the value - // NOTE: this is the same format as kubectl annotate and kubectl label - if strings.HasSuffix(string(o.UID), "-") { - removeOwner = true - } - - existedIndex := 0 - - for existedIndex < len(*existing) { - if ownerRefMatched(o, (*existing)[existedIndex]) { - break - } - existedIndex++ - } - - if existedIndex == len(*existing) { - // There is no matched ownerref found, append the ownerref - // if it is not to be removed. - if !removeOwner { - *existing = append(*existing, o) - *modified = true - } - continue - } - - if removeOwner { - *existing = append((*existing)[:existedIndex], (*existing)[existedIndex+1:]...) - *modified = true - continue - } - - if !reflect.DeepEqual(o, (*existing)[existedIndex]) { - (*existing)[existedIndex] = o - *modified = true - } - } -} - -func ownerRefMatched(existing, required metav1.OwnerReference) bool { - if existing.Name != required.Name { - return false - } - - if existing.Kind != required.Kind { - return false - } - - existingGV, err := schema.ParseGroupVersion(existing.APIVersion) - - if err != nil { - return false - } - - requiredGV, err := schema.ParseGroupVersion(required.APIVersion) - - if err != nil { - return false - } - - if existingGV.Group != requiredGV.Group { - return false - } - - return true -} - -func cleanRemovalOwnerRefs(required []metav1.OwnerReference) []metav1.OwnerReference { - for k := 0; k < len(required); k++ { - if strings.HasSuffix(string(required[k].UID), "-") { - required = append(required[:k], required[k+1:]...) - k-- - } - } - return required -} diff --git a/vendor/github.com/stolostron/cluster-lifecycle-api/action/v1beta1/register.go b/vendor/github.com/stolostron/cluster-lifecycle-api/action/v1beta1/register.go index 9760cd9fa..122f042e0 100644 --- a/vendor/github.com/stolostron/cluster-lifecycle-api/action/v1beta1/register.go +++ b/vendor/github.com/stolostron/cluster-lifecycle-api/action/v1beta1/register.go @@ -12,8 +12,8 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} // schemeBuilder is used to add go types to the GroupVersionKind scheme schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) @@ -21,21 +21,25 @@ var ( // Install is a function which adds this version to a scheme Install = schemeBuilder.AddToScheme + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = schemeBuilder.AddToScheme ) // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, + scheme.AddKnownTypes(GroupVersion, &ManagedClusterAction{}, &ManagedClusterActionList{}, ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, GroupVersion) return nil } // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() + return GroupVersion.WithResource(resource).GroupResource() } diff --git a/vendor/github.com/stolostron/cluster-lifecycle-api/clusterinfo/v1beta1/register.go b/vendor/github.com/stolostron/cluster-lifecycle-api/clusterinfo/v1beta1/register.go index b08b882d0..a4ad9bc40 100644 --- a/vendor/github.com/stolostron/cluster-lifecycle-api/clusterinfo/v1beta1/register.go +++ b/vendor/github.com/stolostron/cluster-lifecycle-api/clusterinfo/v1beta1/register.go @@ -12,8 +12,8 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} // schemeBuilder is used to add go types to the GroupVersionKind scheme schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) @@ -21,21 +21,25 @@ var ( // Install is a function which adds this version to a scheme Install = schemeBuilder.AddToScheme + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = schemeBuilder.AddToScheme ) // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, + scheme.AddKnownTypes(GroupVersion, &ManagedClusterInfo{}, &ManagedClusterInfoList{}, ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, GroupVersion) return nil } // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() + return GroupVersion.WithResource(resource).GroupResource() } diff --git a/vendor/github.com/stolostron/cluster-lifecycle-api/imageregistry/v1alpha1/register.go b/vendor/github.com/stolostron/cluster-lifecycle-api/imageregistry/v1alpha1/register.go index 78c96b498..e60b04a2b 100644 --- a/vendor/github.com/stolostron/cluster-lifecycle-api/imageregistry/v1alpha1/register.go +++ b/vendor/github.com/stolostron/cluster-lifecycle-api/imageregistry/v1alpha1/register.go @@ -12,8 +12,8 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} // schemeBuilder is used to add go types to the GroupVersionKind scheme schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) @@ -21,21 +21,25 @@ var ( // Install is a function which adds this version to a scheme Install = schemeBuilder.AddToScheme + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = schemeBuilder.AddToScheme ) // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, + scheme.AddKnownTypes(GroupVersion, &ManagedClusterImageRegistry{}, &ManagedClusterImageRegistryList{}, ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, GroupVersion) return nil } // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() + return GroupVersion.WithResource(resource).GroupResource() } diff --git a/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/doc.go b/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/doc.go deleted file mode 100644 index 07092a51a..000000000 --- a/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package v1alpha1 contains API Schema definitions for the inventory v1alpha1 API group -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/stolostron/cluster-lifecycle-api/inventory -// +k8s:defaulter-gen=TypeMeta -// +k8s:openapi-gen=true -// +kubebuilder:validation:Optional -// +groupName=inventory.open-cluster-management.io -package v1alpha1 diff --git a/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/inventory.open-cluster-management.io_baremetalassets.crd.yaml b/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/inventory.open-cluster-management.io_baremetalassets.crd.yaml deleted file mode 100644 index f3dde09ce..000000000 --- a/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/inventory.open-cluster-management.io_baremetalassets.crd.yaml +++ /dev/null @@ -1,150 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.0 - creationTimestamp: null - name: baremetalassets.inventory.open-cluster-management.io -spec: - group: inventory.open-cluster-management.io - names: - kind: BareMetalAsset - listKind: BareMetalAssetList - plural: baremetalassets - singular: baremetalasset - scope: Namespaced - preserveUnknownFields: false - versions: - - name: v1alpha1 - deprecated: true - deprecationWarning: "inventory.open-cluster-management.io/v1alpha1 BareMetalAsset is deprecated" - schema: - openAPIV3Schema: - description: BareMetalAsset is the Schema for the baremetalassets API - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: BareMetalAssetSpec defines the desired state of BareMetalAsset - type: object - properties: - bmc: - description: How do we connect to the BMC? - type: object - properties: - address: - description: Address holds the URL for accessing the controller on the network. - type: string - credentialsName: - description: The name of the secret containing the BMC credentials (requires keys "username" and "password"). - type: string - bootMACAddress: - description: Which MAC address will PXE boot? This is optional for some types, but required for libvirt VMs driven by vbmc. - type: string - pattern: '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}' - clusterDeployment: - description: ClusterDeployment which the asset belongs to. - type: object - x-kubernetes-preserve-unknown-fields: true - hardwareProfile: - description: What is the name of the hardware profile for this host? It should only be necessary to set this when inspection cannot automatically determine the profile. - type: string - role: - description: Role holds the role of the asset - type: string - enum: - - master - - worker - status: - description: BareMetalAssetStatus defines the observed state of BareMetalAsset - type: object - properties: - conditions: - description: Conditions describes the state of the BareMetalAsset resource. - type: array - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - type: object - required: - - lastTransitionTime - - message - - reason - - status - - type - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - type: string - format: date-time - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - type: string - maxLength: 32768 - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - type: integer - format: int64 - minimum: 0 - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - type: string - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - status: - description: status of the condition, one of True, False, Unknown. - type: string - enum: - - "True" - - "False" - - Unknown - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - relatedObjects: - description: RelatedObjects is a list of objects created and maintained by this operator. Object references will be added to this list after they have been created AND found in the cluster. - type: array - items: - description: 'ObjectReference contains enough information to let you inspect or modify the referred object. --- New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". Those cannot be well described when embedded. 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple and the version of the actual struct is irrelevant. 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type will affect numerous schemas. Don''t make new APIs embed an underspecified API type they do not control. Instead of using this type, create a locally provided and used type that is well-focused on your reference. For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .' - type: object - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' - type: string - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - resourceVersion: - description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' - type: string - uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' - type: string - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/register.go b/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/register.go deleted file mode 100644 index 23e4ebaf4..000000000 --- a/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/register.go +++ /dev/null @@ -1,41 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const ( - GroupName = "inventory.open-cluster-management.io" - Version = "v1alpha1" -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} - - // schemeBuilder is used to add go types to the GroupVersionKind scheme - schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - - // Install is a function which adds this version to a scheme - Install = schemeBuilder.AddToScheme - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = schemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &BareMetalAsset{}, - &BareMetalAssetList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} diff --git a/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/types.go b/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/types.go deleted file mode 100644 index 280b68960..000000000 --- a/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/types.go +++ /dev/null @@ -1,140 +0,0 @@ -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ManagedClusterResourceNamespace is the namespace on the managed cluster where BareMetalHosts are placed. -const ManagedClusterResourceNamespace string = "openshift-machine-api" - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// BMCDetails contains the information necessary to communicate with -// the bare metal controller module on host. -type BMCDetails struct { - - // Address holds the URL for accessing the controller on the - // network. - Address string `json:"address"` - - // The name of the secret containing the BMC credentials (requires - // keys "username" and "password"). - CredentialsName string `json:"credentialsName"` -} - -// Role represents the role assigned to the asset -type Role string - -const ( - // MasterRole is the master role assigned to the asset - MasterRole Role = "master" - - // WorkerRole is the worker role assigned to the asset - WorkerRole Role = "worker" -) - -// BareMetalAssetSpec defines the desired state of BareMetalAsset -type BareMetalAssetSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file - // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html - - // How do we connect to the BMC? - BMC BMCDetails `json:"bmc,omitempty"` - - // What is the name of the hardware profile for this host? It - // should only be necessary to set this when inspection cannot - // automatically determine the profile. - HardwareProfile string `json:"hardwareProfile,omitempty"` - - // Which MAC address will PXE boot? This is optional for some - // types, but required for libvirt VMs driven by vbmc. - // +kubebuilder:validation:Pattern=`[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}` - BootMACAddress string `json:"bootMACAddress,omitempty"` - - // Role holds the role of the asset - // +kubebuilder:validation:Enum=master;worker - Role Role `json:"role,omitempty"` - - // ClusterDeployment which the asset belongs to. - // +kubebuilder:pruning:PreserveUnknownFields - ClusterDeployment metav1.ObjectMeta `json:"clusterDeployment,omitempty"` -} - -// BareMetalAssetStatus defines the observed state of BareMetalAsset -type BareMetalAssetStatus struct { - // Conditions describes the state of the BareMetalAsset resource. - // +patchMergeKey=type - // +patchStrategy=merge - // +optional - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` - - // RelatedObjects is a list of objects created and maintained by this - // operator. Object references will be added to this list after they have - // been created AND found in the cluster. - // +optional - RelatedObjects []corev1.ObjectReference `json:"relatedObjects,omitempty"` -} - -// Condition Types -const ( - // ConditionCredentialsFound reports whether the secret containing the credentials - // of a BareMetalAsset have been found. - ConditionCredentialsFound string = "CredentialsFound" - - // ConditionAssetSyncStarted reports whether synchronization of a BareMetalHost - // to a managed cluster has started - ConditionAssetSyncStarted string = "AssetSyncStarted" - - // ConditionClusterDeploymentFound reports whether the cluster deployment referenced in - // a BareMetalAsset has been found. - ConditionClusterDeploymentFound string = "ClusterDeploymentFound" - - // ConditionAssetSyncCompleted reports whether synchronization of a BareMetalHost - // to a managed cluster has completed - ConditionAssetSyncCompleted string = "AssetSyncCompleted" -) - -// Condition Reasons -const ( - ConditionReasonSecretNotFound string = "SecretNotFound" - ConditionReasonSecretFound string = "SecretFound" - ConditionReasonNoneSpecified string = "NoneSpecified" - ConditionReasonClusterDeploymentNotFound string = "ClusterDeploymentNotFound" - ConditionReasonClusterDeploymentFound string = "ClusterDeploymentFound" - ConditionReasonSyncSetCreationFailed string = "SyncSetCreationFailed" - ConditionReasonSyncSetCreated string = "SyncSetCreated" - ConditionReasonSyncSetGetFailed string = "SyncSetGetFailed" - ConditionReasonSyncSetUpdateFailed string = "SyncSetUpdateFailed" - ConditionReasonSyncSetUpdated string = "SyncSetUpdated" - ConditionReasonSyncStatusNotFound string = "SyncStatusNotFound" - ConditionReasonSyncSetNotApplied string = "SyncSetNotApplied" - ConditionReasonSyncSetAppliedSuccessful string = "SyncSetAppliedSuccessful" - ConditionReasonSyncSetAppliedFailed string = "SyncSetAppliedFailed" - ConditionReasonUnexpectedResourceCount string = "UnexpectedResourceCount" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// BareMetalAsset is the Schema for the baremetalassets API -// +kubebuilder:subresource:status -// +kubebuilder:resource:path=baremetalassets,scope=Namespaced -type BareMetalAsset struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec BareMetalAssetSpec `json:"spec,omitempty"` - Status BareMetalAssetStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// BareMetalAssetList contains a list of BareMetalAsset -type BareMetalAssetList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []BareMetalAsset `json:"items"` -} diff --git a/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index a353d6f11..000000000 --- a/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,135 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BMCDetails) DeepCopyInto(out *BMCDetails) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BMCDetails. -func (in *BMCDetails) DeepCopy() *BMCDetails { - if in == nil { - return nil - } - out := new(BMCDetails) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalAsset) DeepCopyInto(out *BareMetalAsset) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalAsset. -func (in *BareMetalAsset) DeepCopy() *BareMetalAsset { - if in == nil { - return nil - } - out := new(BareMetalAsset) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BareMetalAsset) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalAssetList) DeepCopyInto(out *BareMetalAssetList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]BareMetalAsset, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalAssetList. -func (in *BareMetalAssetList) DeepCopy() *BareMetalAssetList { - if in == nil { - return nil - } - out := new(BareMetalAssetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BareMetalAssetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalAssetSpec) DeepCopyInto(out *BareMetalAssetSpec) { - *out = *in - out.BMC = in.BMC - in.ClusterDeployment.DeepCopyInto(&out.ClusterDeployment) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalAssetSpec. -func (in *BareMetalAssetSpec) DeepCopy() *BareMetalAssetSpec { - if in == nil { - return nil - } - out := new(BareMetalAssetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalAssetStatus) DeepCopyInto(out *BareMetalAssetStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.RelatedObjects != nil { - in, out := &in.RelatedObjects, &out.RelatedObjects - *out = make([]corev1.ObjectReference, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalAssetStatus. -func (in *BareMetalAssetStatus) DeepCopy() *BareMetalAssetStatus { - if in == nil { - return nil - } - out := new(BareMetalAssetStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/zz_generated.swagger_doc_generated.go deleted file mode 100644 index 8a3893d78..000000000 --- a/vendor/github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1/zz_generated.swagger_doc_generated.go +++ /dev/null @@ -1,63 +0,0 @@ -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_BMCDetails = map[string]string{ - "": "BMCDetails contains the information necessary to communicate with the bare metal controller module on host.", - "address": "Address holds the URL for accessing the controller on the network.", - "credentialsName": "The name of the secret containing the BMC credentials (requires keys \"username\" and \"password\").", -} - -func (BMCDetails) SwaggerDoc() map[string]string { - return map_BMCDetails -} - -var map_BareMetalAsset = map[string]string{ - "": "BareMetalAsset is the Schema for the baremetalassets API", -} - -func (BareMetalAsset) SwaggerDoc() map[string]string { - return map_BareMetalAsset -} - -var map_BareMetalAssetList = map[string]string{ - "": "BareMetalAssetList contains a list of BareMetalAsset", -} - -func (BareMetalAssetList) SwaggerDoc() map[string]string { - return map_BareMetalAssetList -} - -var map_BareMetalAssetSpec = map[string]string{ - "": "BareMetalAssetSpec defines the desired state of BareMetalAsset", - "bmc": "How do we connect to the BMC?", - "hardwareProfile": "What is the name of the hardware profile for this host? It should only be necessary to set this when inspection cannot automatically determine the profile.", - "bootMACAddress": "Which MAC address will PXE boot? This is optional for some types, but required for libvirt VMs driven by vbmc.", - "role": "Role holds the role of the asset", - "clusterDeployment": "ClusterDeployment which the asset belongs to.", -} - -func (BareMetalAssetSpec) SwaggerDoc() map[string]string { - return map_BareMetalAssetSpec -} - -var map_BareMetalAssetStatus = map[string]string{ - "": "BareMetalAssetStatus defines the observed state of BareMetalAsset", - "conditions": "Conditions describes the state of the BareMetalAsset resource.", - "relatedObjects": "RelatedObjects is a list of objects created and maintained by this operator. Object references will be added to this list after they have been created AND found in the cluster.", -} - -func (BareMetalAssetStatus) SwaggerDoc() map[string]string { - return map_BareMetalAssetStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/stolostron/cluster-lifecycle-api/view/v1beta1/register.go b/vendor/github.com/stolostron/cluster-lifecycle-api/view/v1beta1/register.go index 63fefdae8..71e17e6aa 100644 --- a/vendor/github.com/stolostron/cluster-lifecycle-api/view/v1beta1/register.go +++ b/vendor/github.com/stolostron/cluster-lifecycle-api/view/v1beta1/register.go @@ -12,8 +12,8 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} // schemeBuilder is used to add go types to the GroupVersionKind scheme schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) @@ -21,21 +21,25 @@ var ( // Install is a function which adds this version to a scheme Install = schemeBuilder.AddToScheme + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = schemeBuilder.AddToScheme ) // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, + scheme.AddKnownTypes(GroupVersion, &ManagedClusterView{}, &ManagedClusterViewList{}, ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, GroupVersion) return nil } // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() + return GroupVersion.WithResource(resource).GroupResource() } diff --git a/vendor/modules.txt b/vendor/modules.txt index 6bbe9292f..aae92bbc1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -227,12 +227,6 @@ github.com/mattbaird/jsonpatch # github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/metal3-io/baremetal-operator/apis v0.0.0 => github.com/openshift/baremetal-operator/apis v0.0.0-20211201170610-92ffa60c683d -## explicit; go 1.16 -github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1 -# github.com/metal3-io/baremetal-operator/pkg/hardwareutils v0.0.0 => github.com/openshift/baremetal-operator/pkg/hardwareutils v0.0.0-20211201170610-92ffa60c683d -## explicit; go 1.16 -github.com/metal3-io/baremetal-operator/pkg/hardwareutils/bmc # github.com/mitchellh/copystructure v1.2.0 ## explicit; go 1.15 github.com/mitchellh/copystructure @@ -299,7 +293,6 @@ github.com/onsi/gomega/types ## explicit; go 1.16 github.com/openshift/api/config/v1 github.com/openshift/api/oauth/v1 -github.com/openshift/api/operator/v1 github.com/openshift/api/route/v1 # github.com/openshift/build-machinery-go v0.0.0-20220429084610-baff9f8d23b3 ## explicit; go 1.13 @@ -328,9 +321,6 @@ github.com/openshift/client-go/route/clientset/versioned/fake github.com/openshift/client-go/route/clientset/versioned/scheme github.com/openshift/client-go/route/clientset/versioned/typed/route/v1 github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake -# github.com/openshift/custom-resource-status v1.1.2 -## explicit; go 1.12 -github.com/openshift/custom-resource-status/objectreferences/v1 # github.com/openshift/hive v1.1.17-0.20220726120844-e78dfd39116d ## explicit; go 1.18 github.com/openshift/hive/pkg/client/clientset/versioned @@ -361,7 +351,6 @@ github.com/openshift/hive/apis/scheme ## explicit; go 1.17 github.com/openshift/library-go/pkg/authorization/authorizationutil github.com/openshift/library-go/pkg/crypto -github.com/openshift/library-go/pkg/operator/resource/resourcemerge # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -414,13 +403,12 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stolostron/cluster-lifecycle-api v0.0.0-20220930080346-456dd8fcbea8 +# github.com/stolostron/cluster-lifecycle-api v0.0.0-20221107031926-6f0a02d2aaf5 ## explicit; go 1.18 github.com/stolostron/cluster-lifecycle-api/action/v1beta1 github.com/stolostron/cluster-lifecycle-api/clusterinfo/v1beta1 github.com/stolostron/cluster-lifecycle-api/helpers/imageregistry github.com/stolostron/cluster-lifecycle-api/imageregistry/v1alpha1 -github.com/stolostron/cluster-lifecycle-api/inventory/v1alpha1 github.com/stolostron/cluster-lifecycle-api/view/v1beta1 # github.com/stretchr/testify v1.7.2 ## explicit; go 1.13