diff --git a/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml b/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml index 75178916b..e08074e70 100644 --- a/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml +++ b/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml @@ -160,8 +160,10 @@ spec: and ProgressivePerGroup. For Progressive, this is considered over the total number of clusters. For ProgressivePerGroup, this is considered according - to the size of the current group. Default is that - no failures are tolerated. + to the size of the current group. For both Progressive + and ProgressivePerGroup, the MaxFailures does + not apply for MandatoryDecisionGroups, which tolerate + no failures. Default is that no failures are tolerated. pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ x-kubernetes-int-or-string: true minSuccessTime: @@ -255,8 +257,10 @@ spec: and ProgressivePerGroup. For Progressive, this is considered over the total number of clusters. For ProgressivePerGroup, this is considered according - to the size of the current group. Default is that - no failures are tolerated. + to the size of the current group. For both Progressive + and ProgressivePerGroup, the MaxFailures does + not apply for MandatoryDecisionGroups, which tolerate + no failures. Default is that no failures are tolerated. pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ x-kubernetes-int-or-string: true minSuccessTime: @@ -339,8 +343,10 @@ spec: and ProgressivePerGroup. For Progressive, this is considered over the total number of clusters. For ProgressivePerGroup, this is considered according - to the size of the current group. Default is that - no failures are tolerated. + to the size of the current group. For both Progressive + and ProgressivePerGroup, the MaxFailures does + not apply for MandatoryDecisionGroups, which tolerate + no failures. Default is that no failures are tolerated. pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ x-kubernetes-int-or-string: true minSuccessTime: @@ -385,20 +391,6 @@ spec: type: object type: default: All - description: Rollout strategy Types are All, Progressive - and ProgressivePerGroup 1) All means apply the workload - to all clusters in the decision groups at once. 2) - Progressive means apply the workload to the selected - clusters progressively per cluster. The workload will - not be applied to the next cluster unless one of the - current applied clusters reach the successful state - and haven't breached the MaxFailures configuration. - 3) ProgressivePerGroup means apply the workload to - decisionGroup clusters progressively per group. The - workload will not be applied to the next decisionGroup - unless all clusters in the current group reach the - successful state and haven't breached the MaxFailures - configuration. enum: - All - Progressive diff --git a/cluster/v1alpha1/helpers.go b/cluster/v1alpha1/helpers.go index 6e1c17928..8aaa6ab31 100644 --- a/cluster/v1alpha1/helpers.go +++ b/cluster/v1alpha1/helpers.go @@ -162,10 +162,10 @@ func (r *RolloutHandler[T]) getProgressiveClusters(rolloutStrategy RolloutStrate groupKeys := decisionGroupsToGroupKeys(strategy.Progressive.MandatoryDecisionGroups.MandatoryDecisionGroups) clusterGroups = r.pdTracker.ExistingClusterGroups(groupKeys...) - // Perform progressive rollOut for mandatory decision groups first. + // Perform progressive rollOut for mandatory decision groups first, tolerating no failures if len(clusterGroups) > 0 { rolloutResult := progressivePerGroup( - clusterGroups, intstr.FromInt(maxFailures), minSuccessTime, failureTimeout, currentClusterStatus, true, + clusterGroups, intstr.FromInt(0), minSuccessTime, failureTimeout, currentClusterStatus, ) if len(rolloutResult.ClustersToRollout) > 0 || len(rolloutResult.ClustersTimeOut) > 0 { rolloutResult.ClustersRemoved = removedClusterStatus @@ -219,9 +219,9 @@ func (r *RolloutHandler[T]) getProgressivePerGroupClusters(rolloutStrategy Rollo groupKeys := decisionGroupsToGroupKeys(mandatoryDecisionGroups) clusterGroups = r.pdTracker.ExistingClusterGroups(groupKeys...) - // Perform progressive rollout per group for mandatory decision groups first + // Perform progressive rollout per group for mandatory decision groups first, tolerating no failures if len(clusterGroups) > 0 { - rolloutResult := progressivePerGroup(clusterGroups, maxFailures, minSuccessTime, failureTimeout, currentClusterStatus, false) + rolloutResult := progressivePerGroup(clusterGroups, intstr.FromInt(0), minSuccessTime, failureTimeout, currentClusterStatus) if len(rolloutResult.ClustersToRollout) > 0 || len(rolloutResult.ClustersTimeOut) > 0 { rolloutResult.ClustersRemoved = removedClusterStatus @@ -233,7 +233,7 @@ func (r *RolloutHandler[T]) getProgressivePerGroupClusters(rolloutStrategy Rollo restClusterGroups := r.pdTracker.ExistingClusterGroupsBesides(groupKeys...) // Perform progressive rollout per group for the remaining decision groups - rolloutResult := progressivePerGroup(restClusterGroups, maxFailures, minSuccessTime, failureTimeout, currentClusterStatus, false) + rolloutResult := progressivePerGroup(restClusterGroups, maxFailures, minSuccessTime, failureTimeout, currentClusterStatus) rolloutResult.ClustersRemoved = removedClusterStatus return &strategy, rolloutResult, nil @@ -291,7 +291,7 @@ func progressivePerCluster( // If there was a breach of MaxFailures, only handle clusters that have already had workload applied if !failureBreach || failureBreach && status.Status != ToApply { - rolloutClusters, timeoutClusters = determineRolloutStatus(status, minSuccessTime, timeout, rolloutClusters, timeoutClusters) + rolloutClusters, timeoutClusters = determineRolloutStatus(&status, minSuccessTime, timeout, rolloutClusters, timeoutClusters) } // Keep track of TimeOut or Failed clusters and check total against MaxFailures @@ -360,44 +360,43 @@ func progressivePerGroup( minSuccessTime time.Duration, timeout time.Duration, existingClusterStatus []ClusterRolloutStatus, - accumulateFailures bool, ) RolloutResult { var rolloutClusters, timeoutClusters []ClusterRolloutStatus - existingClusters := make(map[string]bool) + existingClusters := make(map[string]RolloutStatus) for _, status := range existingClusterStatus { if status.ClusterName == "" { continue } - if status.Status == ToApply { - // Set as false to consider the cluster in the decisionGroups iteration. - existingClusters[status.ClusterName] = false - } else { - existingClusters[status.ClusterName] = true - rolloutClusters, timeoutClusters = determineRolloutStatus(status, minSuccessTime, timeout, rolloutClusters, timeoutClusters) + // ToApply will be reconsidered in the decisionGroups iteration. + if status.Status != ToApply { + rolloutClusters, timeoutClusters = determineRolloutStatus(&status, minSuccessTime, timeout, rolloutClusters, timeoutClusters) + existingClusters[status.ClusterName] = status.Status } } - var failureCount int + totalFailureCount := 0 failureBreach := false clusterGroupKeys := clusterGroupsMap.GetOrderedGroupKeys() for _, key := range clusterGroupKeys { + groupFailureCount := 0 if subclusters, ok := clusterGroupsMap[key]; ok { - // Only reset the failure count for ProgressivePerGroup - // since Progressive is over the total number of clusters - if !accumulateFailures { - failureCount = 0 - } - failureBreach = false // Calculate the max failure threshold for the group--the returned error was checked // previously, so it's ignored here - maxGroupFailures, _ := calculateRolloutSize(maxFailures, subclusters.Len(), 0) + maxGroupFailures, _ := calculateRolloutSize(maxFailures, len(subclusters), 0) // Iterate through clusters in the group clusters := subclusters.UnsortedList() sort.Strings(clusters) for _, cluster := range clusters { - if existingClusters[cluster] { + if status, ok := existingClusters[cluster]; ok { + // Keep track of TimeOut or Failed clusters and check total against MaxFailures + if status == TimeOut || status == Failed { + groupFailureCount++ + + failureBreach = groupFailureCount > maxGroupFailures + } + continue } @@ -407,18 +406,13 @@ func progressivePerGroup( GroupKey: key, } rolloutClusters = append(rolloutClusters, status) - - // Keep track of TimeOut or Failed clusters and check total against MaxFailures - if status.Status == TimeOut || status.Status == Failed { - failureCount++ - - failureBreach = failureCount > maxGroupFailures - } } - // As it is perGroup Return if there are clusters to rollOut, - // or there was a breach of the MaxFailure configuration - if len(rolloutClusters) > maxGroupFailures || failureBreach { + totalFailureCount += groupFailureCount + + // As it is perGroup, return if there are clusters to rollOut that aren't + // Failed/Timeout, or there was a breach of the MaxFailure configuration + if len(rolloutClusters)-totalFailureCount > 0 || failureBreach { return RolloutResult{ ClustersToRollout: rolloutClusters, ClustersTimeOut: timeoutClusters, @@ -448,7 +442,7 @@ func progressivePerGroup( // the rollOut Clusters. // 2. If timeout is set to 0, the function append the clusterStatus to the timeOut clusters. func determineRolloutStatus( - status ClusterRolloutStatus, + status *ClusterRolloutStatus, minSuccessTime time.Duration, timeout time.Duration, rolloutClusters []ClusterRolloutStatus, @@ -457,13 +451,13 @@ func determineRolloutStatus( switch status.Status { case ToApply: - rolloutClusters = append(rolloutClusters, status) + rolloutClusters = append(rolloutClusters, *status) case Succeeded: // If the cluster succeeded but is still within the MinSuccessTime (i.e. "soak" time), // still add it to the list of rolloutClusters minSuccessTimeTime := getTimeOutTime(status.LastTransitionTime, minSuccessTime) if RolloutClock.Now().Before(minSuccessTimeTime.Time) { - rolloutClusters = append(rolloutClusters, status) + rolloutClusters = append(rolloutClusters, *status) } return rolloutClusters, timeoutClusters @@ -474,10 +468,10 @@ func determineRolloutStatus( status.TimeOutTime = timeOutTime // check if current time is before the timeout time if RolloutClock.Now().Before(timeOutTime.Time) { - rolloutClusters = append(rolloutClusters, status) + rolloutClusters = append(rolloutClusters, *status) } else { status.Status = TimeOut - timeoutClusters = append(timeoutClusters, status) + timeoutClusters = append(timeoutClusters, *status) } } diff --git a/cluster/v1alpha1/helpers_test.go b/cluster/v1alpha1/helpers_test.go index 91865e6f9..f8403105a 100644 --- a/cluster/v1alpha1/helpers_test.go +++ b/cluster/v1alpha1/helpers_test.go @@ -592,11 +592,11 @@ func TestGetRolloutCluster_Progressive(t *testing.T) { }, }, { - name: "test progressive rollout with timeout 0s", + name: "test progressive rollout with timeout 0s, maxFailures 3", rolloutStrategy: RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ - RolloutConfig: RolloutConfig{ProgressDeadline: "0s"}, + RolloutConfig: RolloutConfig{ProgressDeadline: "0s", MaxFailures: intstr.FromInt(3)}, MaxConcurrency: intstr.FromInt(2), }, }, @@ -608,7 +608,7 @@ func TestGetRolloutCluster_Progressive(t *testing.T) { expectRolloutStrategy: &RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ - RolloutConfig: RolloutConfig{ProgressDeadline: "0s"}, + RolloutConfig: RolloutConfig{ProgressDeadline: "0s", MaxFailures: intstr.FromInt(3)}, MaxConcurrency: intstr.FromInt(2), }, }, @@ -644,7 +644,7 @@ func TestGetRolloutCluster_Progressive(t *testing.T) { }, }, { - name: "test progressive rollout with mandatoryDecisionGroup and timeout 90s ", + name: "test progressive rollout with mandatoryDecisionGroup and timeout 90s", rolloutStrategy: RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ @@ -701,6 +701,7 @@ func TestGetRolloutCluster_Progressive(t *testing.T) { ClustersTimeOut: []ClusterRolloutStatus{ {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, }, + MaxFailureBreach: true, }, }, { @@ -854,6 +855,7 @@ func TestGetRolloutCluster_Progressive(t *testing.T) { ClustersTimeOut: []ClusterRolloutStatus{ {ClusterName: "cluster1", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, }, + MaxFailureBreach: true, }, }, { @@ -1086,14 +1088,15 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { ClustersTimeOut: []ClusterRolloutStatus{ {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, }, + MaxFailureBreach: true, }, }, { - name: "test progressivePerGroup rollout with timeout 90s and first group timeOut", + name: "test progressivePerGroup rollout with timeout 90s and first group timeOut, maxFailures 2", rolloutStrategy: RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - RolloutConfig: RolloutConfig{ProgressDeadline: "90s"}, + RolloutConfig: RolloutConfig{ProgressDeadline: "90s", MaxFailures: intstr.FromInt(2)}, }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ @@ -1105,7 +1108,7 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { expectRolloutStrategy: &RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - RolloutConfig: RolloutConfig{ProgressDeadline: "90s"}, + RolloutConfig: RolloutConfig{ProgressDeadline: "90s", MaxFailures: intstr.FromInt(2)}, }, }, existingWorkloads: []dummyWorkload{ @@ -1144,7 +1147,7 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { rolloutStrategy: RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - RolloutConfig: RolloutConfig{ProgressDeadline: "90s"}, + RolloutConfig: RolloutConfig{ProgressDeadline: "90s", MaxFailures: intstr.FromInt(2)}, }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ @@ -1156,7 +1159,7 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { expectRolloutStrategy: &RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - RolloutConfig: RolloutConfig{ProgressDeadline: "90s"}, + RolloutConfig: RolloutConfig{ProgressDeadline: "90s", MaxFailures: intstr.FromInt(2)}, }, }, existingWorkloads: []dummyWorkload{ @@ -1252,14 +1255,15 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { ClustersToRollout: []ClusterRolloutStatus{ {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTimeMax_120s}, }, + MaxFailureBreach: true, }, }, { - name: "test progressivePerGroup rollout with timeout None, first group 1 cluster is failing and maxFailures is 1", + name: "test progressivePerGroup rollout with timeout None, first group 1 cluster is failing and maxFailures is 2", rolloutStrategy: RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - RolloutConfig: RolloutConfig{ProgressDeadline: "None", MaxFailures: intstr.FromInt(1)}, + RolloutConfig: RolloutConfig{ProgressDeadline: "None", MaxFailures: intstr.FromInt(2)}, }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ @@ -1271,7 +1275,7 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { expectRolloutStrategy: &RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - RolloutConfig: RolloutConfig{ProgressDeadline: "None", MaxFailures: intstr.FromInt(1)}, + RolloutConfig: RolloutConfig{ProgressDeadline: "None", MaxFailures: intstr.FromInt(2)}, }, }, existingWorkloads: []dummyWorkload{ @@ -1348,6 +1352,7 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTimeMax_120s}, {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTimeMax_120s}, }, + MaxFailureBreach: true, }, }, { @@ -1404,10 +1409,11 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, }, + MaxFailureBreach: true, }, }, { - name: "test progressivePerGroup rollout with mandatoryDecisionGroup failing 1 cluster, timeout 90s and with maxFailures is 1", + name: "test progressivePerGroup rollout with mandatoryDecisionGroup failing 1 cluster, timeout 90s and with maxFailures is 2", rolloutStrategy: RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ @@ -1416,7 +1422,7 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { {GroupName: "group1"}, }, }, - RolloutConfig: RolloutConfig{ProgressDeadline: "90s", MaxFailures: intstr.FromInt(1)}, + RolloutConfig: RolloutConfig{ProgressDeadline: "90s", MaxFailures: intstr.FromInt(2)}, }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ @@ -1433,7 +1439,7 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { {GroupName: "group1"}, }, }, - RolloutConfig: RolloutConfig{ProgressDeadline: "90s", MaxFailures: intstr.FromInt(1)}, + RolloutConfig: RolloutConfig{ProgressDeadline: "90s", MaxFailures: intstr.FromInt(2)}, }, }, existingWorkloads: []dummyWorkload{ @@ -1460,6 +1466,7 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { ClustersTimeOut: []ClusterRolloutStatus{ {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, }, + MaxFailureBreach: true, }, }, { @@ -2003,11 +2010,11 @@ func TestGetRolloutCluster_ClusterRemoved(t *testing.T) { }, }, { - name: "test progressivePerGroup rollout with timeout 90s and cluster removed after rollout start while the group timeout.", + name: "test progressivePerGroup rollout with timeout 90s and cluster removed after rollout start while the group timeout", rolloutStrategy: RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - RolloutConfig: RolloutConfig{ProgressDeadline: "90s"}, + RolloutConfig: RolloutConfig{ProgressDeadline: "90s", MaxFailures: intstr.FromInt(2)}, }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ @@ -2223,7 +2230,7 @@ func TestDetermineRolloutStatus(t *testing.T) { RolloutClock = testingclock.NewFakeClock(fakeTime.Time) for _, tc := range testCases { var rolloutClusters, timeoutClusters []ClusterRolloutStatus - rolloutClusters, timeoutClusters = determineRolloutStatus(tc.clusterStatus, tc.minSuccessTime, tc.timeout, rolloutClusters, timeoutClusters) + rolloutClusters, timeoutClusters = determineRolloutStatus(&tc.clusterStatus, tc.minSuccessTime, tc.timeout, rolloutClusters, timeoutClusters) if !reflect.DeepEqual(rolloutClusters, tc.expectRolloutClusters) { t.Errorf("Case: %v Failed to run NewRolloutHandler.\nExpect rollout clusters: %+v\nActual rollout clusters: %+v", tc.name, tc.expectRolloutClusters, rolloutClusters) return diff --git a/cluster/v1alpha1/types_rolloutstrategy.go b/cluster/v1alpha1/types_rolloutstrategy.go index 62e96ccf5..4fbe6524a 100644 --- a/cluster/v1alpha1/types_rolloutstrategy.go +++ b/cluster/v1alpha1/types_rolloutstrategy.go @@ -7,7 +7,8 @@ import ( // +k8s:deepcopy-gen=true -// RolloutStrategy API used by workload applier APIs to define how the workload will be applied to the selected clusters by the Placement and DecisionStrategy. +// RolloutStrategy API used by workload applier APIs to define how the workload will be applied to +// the selected clusters by the Placement and DecisionStrategy. type RolloutType string @@ -24,12 +25,13 @@ const ( type RolloutStrategy struct { // Rollout strategy Types are All, Progressive and ProgressivePerGroup // 1) All means apply the workload to all clusters in the decision groups at once. - // 2) Progressive means apply the workload to the selected clusters progressively per cluster. The workload will not - // be applied to the next cluster unless one of the current applied clusters reach the successful state and haven't - // breached the MaxFailures configuration. - // 3) ProgressivePerGroup means apply the workload to decisionGroup clusters progressively per group. The workload - // will not be applied to the next decisionGroup unless all clusters in the current group reach the successful - // state and haven't breached the MaxFailures configuration. + // 2) Progressive means apply the workload to the selected clusters progressively per cluster. The + // workload will not be applied to the next cluster unless one of the current applied clusters + // reach the successful state and haven't breached the MaxFailures configuration. + // 3) ProgressivePerGroup means apply the workload to decisionGroup clusters progressively per + // group. The workload will not be applied to the next decisionGroup unless all clusters in the + // current group reach the successful state and haven't breached the MaxFailures configuration. + // +kubebuilder:validation:Enum=All;Progressive;ProgressivePerGroup // +kubebuilder:default:=All // +optional @@ -50,37 +52,42 @@ type RolloutStrategy struct { // Timeout to consider while applying the workload. type RolloutConfig struct { - // MinSuccessTime is a "soak" time. In other words, the minimum amount of time the workload applier controller will - // wait from the start of each rollout before proceeding (assuming a successful state has been reached and MaxFailures - // wasn't breached). + // MinSuccessTime is a "soak" time. In other words, the minimum amount of time the workload + // applier controller will wait from the start of each rollout before proceeding (assuming a + // successful state has been reached and MaxFailures wasn't breached). // MinSuccessTime is only considered for rollout types Progressive and ProgressivePerGroup. - // The default value is 0 meaning the workload applier proceeds immediately after a successful state is reached. + // The default value is 0 meaning the workload applier proceeds immediately after a successful + // state is reached. // MinSuccessTime must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s // +kubebuilder:default:="0" // +optional MinSuccessTime metav1.Duration `json:"minSuccessTime,omitempty"` - // ProgressDeadline defines how long workload applier controller will wait for the workload to reach a successful state in the cluster. - // ProgressDeadline default value is "None", meaning the workload applier will wait for a successful state indefinitely. + // ProgressDeadline defines how long workload applier controller will wait for the workload to + // reach a successful state in the cluster. + // ProgressDeadline default value is "None", meaning the workload applier will wait for a + // successful state indefinitely. // ProgressDeadline must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s // +kubebuilder:validation:Pattern="^(([0-9])+[h|m|s])|None$" // +kubebuilder:default:="None" // +optional ProgressDeadline string `json:"progressDeadline,omitempty"` - // MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the - // next rollout. - // MaxFailures is only considered for rollout types Progressive and ProgressivePerGroup. For Progressive, this is - // considered over the total number of clusters. For ProgressivePerGroup, this is considered according to the size of - // the current group. + // MaxFailures is a percentage or number of clusters in the current rollout that can fail before + // proceeding to the next rollout. + // MaxFailures is only considered for rollout types Progressive and ProgressivePerGroup. For + // Progressive, this is considered over the total number of clusters. For ProgressivePerGroup, + // this is considered according to the size of the current group. For both Progressive and + // ProgressivePerGroup, the MaxFailures does not apply for MandatoryDecisionGroups, which tolerate + // no failures. // Default is that no failures are tolerated. // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" // +kubebuilder:validation:XIntOrString // +kubebuilder:default="0" // +optional MaxFailures intstr.IntOrString `json:"maxFailures,omitempty"` - // Timeout defines how long the workload applier controller will wait until the workload reaches a successful state in - // the cluster. - // Timeout default value is None meaning the workload applier will not proceed apply workload to other clusters if did - // not reach the successful state. + // Timeout defines how long the workload applier controller will wait until the workload reaches a + // successful state in the cluster. + // Timeout default value is None meaning the workload applier will not proceed apply workload to + // other clusters if did not reach the successful state. // Timeout must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s // // Deprecated: Use ProgressDeadline instead. @@ -93,19 +100,23 @@ type RolloutConfig struct { // MandatoryDecisionGroup set the decision group name or group index. // GroupName is considered first to select the decisionGroups then GroupIndex. type MandatoryDecisionGroup struct { - // GroupName of the decision group should match the placementDecisions label value with label key cluster.open-cluster-management.io/decision-group-name + // GroupName of the decision group should match the placementDecisions label value with label key + // cluster.open-cluster-management.io/decision-group-name // +optional GroupName string `json:"groupName,omitempty"` - // GroupIndex of the decision group should match the placementDecisions label value with label key cluster.open-cluster-management.io/decision-group-index + // GroupIndex of the decision group should match the placementDecisions label value with label key + // cluster.open-cluster-management.io/decision-group-index // +optional GroupIndex int32 `json:"groupIndex,omitempty"` } // MandatoryDecisionGroups type MandatoryDecisionGroups struct { - // List of the decision groups names or indexes to apply the workload first and fail if workload did not reach successful state. - // GroupName or GroupIndex must match with the decisionGroups defined in the placement's decisionStrategy + // List of the decision groups names or indexes to apply the workload first and fail if workload + // did not reach successful state. + // GroupName or GroupIndex must match with the decisionGroups defined in the placement's + // decisionStrategy // +optional MandatoryDecisionGroups []MandatoryDecisionGroup `json:"mandatoryDecisionGroups,omitempty"` } @@ -133,8 +144,9 @@ type RolloutProgressive struct { // +optional MandatoryDecisionGroups `json:",inline"` - // MaxConcurrency is the max number of clusters to deploy workload concurrently. The default value for MaxConcurrency - // is determined from the clustersPerDecisionGroup defined in the placement->DecisionStrategy. + // MaxConcurrency is the max number of clusters to deploy workload concurrently. The default value + // for MaxConcurrency is determined from the clustersPerDecisionGroup defined in the + // placement->DecisionStrategy. // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" // +kubebuilder:validation:XIntOrString // +optional diff --git a/cluster/v1alpha1/zz_generated.swagger_doc_generated.go b/cluster/v1alpha1/zz_generated.swagger_doc_generated.go index e983fb5a1..79766e058 100644 --- a/cluster/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/cluster/v1alpha1/zz_generated.swagger_doc_generated.go @@ -109,7 +109,7 @@ var map_RolloutConfig = map[string]string{ "": "Timeout to consider while applying the workload.", "minSuccessTime": "MinSuccessTime is a \"soak\" time. In other words, the minimum amount of time the workload applier controller will wait from the start of each rollout before proceeding (assuming a successful state has been reached and MaxFailures wasn't breached). MinSuccessTime is only considered for rollout types Progressive and ProgressivePerGroup. The default value is 0 meaning the workload applier proceeds immediately after a successful state is reached. MinSuccessTime must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s", "progressDeadline": "ProgressDeadline defines how long workload applier controller will wait for the workload to reach a successful state in the cluster. ProgressDeadline default value is \"None\", meaning the workload applier will wait for a successful state indefinitely. ProgressDeadline must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s", - "maxFailures": "MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures is only considered for rollout types Progressive and ProgressivePerGroup. For Progressive, this is considered over the total number of clusters. For ProgressivePerGroup, this is considered according to the size of the current group. Default is that no failures are tolerated.", + "maxFailures": "MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures is only considered for rollout types Progressive and ProgressivePerGroup. For Progressive, this is considered over the total number of clusters. For ProgressivePerGroup, this is considered according to the size of the current group. For both Progressive and ProgressivePerGroup, the MaxFailures does not apply for MandatoryDecisionGroups, which tolerate no failures. Default is that no failures are tolerated.", "timeout": "Timeout defines how long the workload applier controller will wait until the workload reaches a successful state in the cluster. Timeout default value is None meaning the workload applier will not proceed apply workload to other clusters if did not reach the successful state. Timeout must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s\n\nDeprecated: Use ProgressDeadline instead.", } @@ -136,7 +136,6 @@ func (RolloutProgressivePerGroup) SwaggerDoc() map[string]string { var map_RolloutStrategy = map[string]string{ "": "Rollout strategy to apply workload to the selected clusters by Placement and DecisionStrategy.", - "type": "Rollout strategy Types are All, Progressive and ProgressivePerGroup 1) All means apply the workload to all clusters in the decision groups at once. 2) Progressive means apply the workload to the selected clusters progressively per cluster. The workload will not\n be applied to the next cluster unless one of the current applied clusters reach the successful state and haven't\n breached the MaxFailures configuration.\n3) ProgressivePerGroup means apply the workload to decisionGroup clusters progressively per group. The workload\n will not be applied to the next decisionGroup unless all clusters in the current group reach the successful\n state and haven't breached the MaxFailures configuration.", "all": "All defines required fields for RolloutStrategy type All", "progressive": "Progressive defines required fields for RolloutStrategy type Progressive", "progressivePerGroup": "ProgressivePerGroup defines required fields for RolloutStrategy type ProgressivePerGroup", diff --git a/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml b/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml index b437dcf6e..c5a083042 100644 --- a/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml +++ b/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml @@ -349,7 +349,10 @@ spec: For Progressive, this is considered over the total number of clusters. For ProgressivePerGroup, this is considered according to the size of the current - group. Default is that no failures are tolerated. + group. For both Progressive and ProgressivePerGroup, + the MaxFailures does not apply for MandatoryDecisionGroups, + which tolerate no failures. Default is that no failures + are tolerated. pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ x-kubernetes-int-or-string: true minSuccessTime: @@ -440,7 +443,10 @@ spec: For Progressive, this is considered over the total number of clusters. For ProgressivePerGroup, this is considered according to the size of the current - group. Default is that no failures are tolerated. + group. For both Progressive and ProgressivePerGroup, + the MaxFailures does not apply for MandatoryDecisionGroups, + which tolerate no failures. Default is that no failures + are tolerated. pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ x-kubernetes-int-or-string: true minSuccessTime: @@ -521,7 +527,10 @@ spec: For Progressive, this is considered over the total number of clusters. For ProgressivePerGroup, this is considered according to the size of the current - group. Default is that no failures are tolerated. + group. For both Progressive and ProgressivePerGroup, + the MaxFailures does not apply for MandatoryDecisionGroups, + which tolerate no failures. Default is that no failures + are tolerated. pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ x-kubernetes-int-or-string: true minSuccessTime: @@ -564,18 +573,6 @@ spec: type: object type: default: All - description: Rollout strategy Types are All, Progressive - and ProgressivePerGroup 1) All means apply the workload - to all clusters in the decision groups at once. 2) Progressive - means apply the workload to the selected clusters progressively - per cluster. The workload will not be applied to the next - cluster unless one of the current applied clusters reach - the successful state and haven't breached the MaxFailures - configuration. 3) ProgressivePerGroup means apply the - workload to decisionGroup clusters progressively per group. - The workload will not be applied to the next decisionGroup - unless all clusters in the current group reach the successful - state and haven't breached the MaxFailures configuration. enum: - All - Progressive