diff --git a/deploy/crds/shipwright.io_buildruns.yaml b/deploy/crds/shipwright.io_buildruns.yaml index 5d6b1cf33c..6542d7c2a9 100644 --- a/deploy/crds/shipwright.io_buildruns.yaml +++ b/deploy/crds/shipwright.io_buildruns.yaml @@ -352,6 +352,30 @@ spec: - name type: object type: array + retention: + description: Contains information about retention params + properties: + failedLimit: + description: FailedLimit defines the maximum number of failed + buildruns that should exist. + minimum: 1 + type: integer + succeededLimit: + description: SucceededLimit defines the maximum number of + succeeded buildruns that should exist. + minimum: 1 + type: integer + ttlAfterFailed: + description: TTLAfterFailed defines the maximum duration of + time the failed buildrun should exist. + format: duration + type: string + ttlAfterSucceeded: + description: TTLAfterSucceeded defines the maximum duration + of time the succeeded buildrun should exist. + format: duration + type: string + type: object source: description: Source refers to the Git repository containing the source code to be built. @@ -686,6 +710,20 @@ spec: - name type: object type: array + retention: + description: Contains information about retention params + properties: + ttlAfterFailed: + description: TTLAfterFailed defines the maximum duration of time + the failed buildrun should exist. + format: duration + type: string + ttlAfterSucceeded: + description: TTLAfterSucceeded defines the maximum duration of + time the succeeded buildrun should exist. + format: duration + type: string + type: object serviceAccount: description: ServiceAccount refers to the kubernetes serviceaccount which is used for resource control. Default serviceaccount will @@ -1020,6 +1058,30 @@ spec: - name type: object type: array + retention: + description: Contains information about retention params + properties: + failedLimit: + description: FailedLimit defines the maximum number of failed + buildruns that should exist. + minimum: 1 + type: integer + succeededLimit: + description: SucceededLimit defines the maximum number of + succeeded buildruns that should exist. + minimum: 1 + type: integer + ttlAfterFailed: + description: TTLAfterFailed defines the maximum duration of + time the failed buildrun should exist. + format: duration + type: string + ttlAfterSucceeded: + description: TTLAfterSucceeded defines the maximum duration + of time the succeeded buildrun should exist. + format: duration + type: string + type: object source: description: Source refers to the Git repository containing the source code to be built. diff --git a/deploy/crds/shipwright.io_builds.yaml b/deploy/crds/shipwright.io_builds.yaml index 508b7bc1c8..77eb0df1fd 100644 --- a/deploy/crds/shipwright.io_builds.yaml +++ b/deploy/crds/shipwright.io_builds.yaml @@ -331,6 +331,30 @@ spec: - name type: object type: array + retention: + description: Contains information about retention params + properties: + failedLimit: + description: FailedLimit defines the maximum number of failed + buildruns that should exist. + minimum: 1 + type: integer + succeededLimit: + description: SucceededLimit defines the maximum number of succeeded + buildruns that should exist. + minimum: 1 + type: integer + ttlAfterFailed: + description: TTLAfterFailed defines the maximum duration of time + the failed buildrun should exist. + format: duration + type: string + ttlAfterSucceeded: + description: TTLAfterSucceeded defines the maximum duration of + time the succeeded buildrun should exist. + format: duration + type: string + type: object source: description: Source refers to the Git repository containing the source code to be built. diff --git a/docs/build.md b/docs/build.md index e834a36808..0345023181 100644 --- a/docs/build.md +++ b/docs/build.md @@ -15,6 +15,7 @@ SPDX-License-Identifier: Apache-2.0 - [Defining ParamValues](#defining-paramvalues) - [Defining the Builder or Dockerfile](#defining-the-builder-or-dockerfile) - [Defining the Output](#defining-the-output) + - [Defining Retention Parameters](#defining-retention-parameters) - [BuildRun deletion](#BuildRun-deletion) ## Overview @@ -29,6 +30,7 @@ A `Build` resource allows the user to define: - dockerfile - output - env +- retention A `Build` is available within a namespace. @@ -87,7 +89,11 @@ The `Build` definition supports the following fields: - `metadata.annotations[build.shipwright.io/build-run-deletion]` - Defines if delete all related BuildRuns when deleting the Build. The default is `false`. - `spec.output.annotations` - Refers to a list of `key/value` that could be used to [annotate](https://github.com/opencontainers/image-spec/blob/main/annotations.md) the output image. - `spec.output.labels` - Refers to a list of `key/value` that could be used to label the output image. - - `spec.env` - Specifies additional environment variables that should be passed to the build container. The available variables depend on the tool used by the chosen build strategy. + - `spec.env` - Specifies additional environment variables that should be passed to the build container. The available variables depend on the tool that is being used by the chosen build strategy. + - `spec.retention.ttlAfterFailed` - Specifies the duration for which a failed buildrun can exist. + - `spec.retention.ttlAfterSucceeded` - Specifies the duration for which a successful buildrun can exist. + - `spec.retention.failedLimit` - Specifies the number of failed buildrun that can exist. + - `spec.retention.succeededLimit` - Specifies the number of successful buildrun can exist. ### Defining the Source @@ -530,6 +536,41 @@ You can verify which labels were added to the output image that is available on docker inspect us.icr.io/source-to-image-build/nodejs-ex | jq ".[].Config.Labels" ``` +### Defining Retention Parameters + +A `Build` resource can specify how long a completed BuildRun can exist and the number of buildruns that have failed or succeeded that should exist. Instead of manually cleaning up old BuildRuns, retention parameters provide an alternate method for cleaning up BuildRuns automatically. + +As part of the retention parameters, we have the following fields: + +- `retention.succeededLimit` - Defines number of succeeded BuildRuns for a Build that can exist. +- `retention.failedLimit` - Defines number of failed BuildRuns for a Build that can exist. +- `retention.ttlAfterFailed` - Specifies the duration for which a failed buildrun can exist. +- `retention.ttlAfterSucceeded` - Specifies the duration for which a successful buildrun can exist. + +An example of a user using both TTL and Limit retention fields. In case of such a configuration, BuildRun will get deleted once the first criteria is met. + +```yaml + apiVersion: shipwright.io/v1alpha1 + kind: Build + metadata: + name: build-retention-ttl + spec: + source: + url: "https://github.com/shipwright-io/sample-go" + contextDir: docker-build + strategy: + kind: ClusterBuildStrategy + output: + ... + retention: + ttlAfterFailed: 30m + ttlAfterSucceeded: 1h + failedLimit: 10 + succeededLimit: 20 +``` + +**NOTE**: When changes are made to `retention.failedLimit` and `retention.succeededLimit` values, they come into effect as soon as the build is applied, thereby enforcing the new limits. On the other hand, changing the `retention.ttlAfterFailed` and `retention.ttlAfterSucceeded` values will only affect new buildruns. Old buildruns will adhere to the old TTL retention values. In case TTL values are defined in buildrun specifications as well as build specifications, priority will be given to the values defined in the buildrun specifications. + ### Sources Sources represent remote artifacts, as in external entities added to the build context before the actual Build starts. Therefore, you may employ `.spec.sources` to download artifacts from external repositories. diff --git a/docs/buildrun.md b/docs/buildrun.md index c1b941b274..4505e3f5be 100644 --- a/docs/buildrun.md +++ b/docs/buildrun.md @@ -13,7 +13,9 @@ SPDX-License-Identifier: Apache-2.0 - [Defining the BuildSpec](#defining-the-buildspec) - [Defining ParamValues](#defining-paramvalues) - [Defining the ServiceAccount](#defining-the-serviceaccount) + - [Defining Retention Parameters](#defining-retention-parameters) - [Canceling a `BuildRun`](#canceling-a-buildrun) +- [Automatic `BuildRun` deletion](#automatic-buildrun-deletion) - [Specifying Environment Variables](#specifying-environment-variables) - [BuildRun Status](#buildrun-status) - [Understanding the state of a BuildRun](#understanding-the-state-of-a-buildrun) @@ -166,6 +168,32 @@ You can also use set the `spec.serviceAccount.generate` path to `true`. This wil _**Note**_: When the service account is not defined, the `BuildRun` uses the `pipeline` service account if it exists in the namespace, and falls back to the `default` service account. +### Defining Retention Parameters + +A `Buildrun` resource can specify how long a completed BuildRun can exist. Instead of manually cleaning up old BuildRuns, retention parameters provide an alternate method for cleaning up BuildRuns automatically. + +As part of the buildrun retention parameters, we have the following fields: + +- `retention.ttlAfterFailed` - Specifies the duration for which a failed buildrun can exist. +- `retention.ttlAfterSucceeded` - Specifies the duration for which a successful buildrun can exist. + +An example of a user using buildrun TTL parameters. + +```yaml +apiVersion: shipwright.io/v1alpha1 +kind: BuildRun +metadata: + name: buidrun-retention-ttl +spec: + buildRef: + name: build-retention-ttl + retention: + ttlAfterFailed: 10m + ttlAfterSucceeded: 10m +``` + +**NOTE** In case TTL values are defined in buildrun specifications as well as build specifications, priority will be given to the values defined in the buildrun specifications. + ## Canceling a `BuildRun` To cancel a `BuildRun` that's currently executing, update its status to mark it as canceled. @@ -184,6 +212,20 @@ spec: state: "BuildRunCanceled" ``` +## Automatic `BuildRun` deletion + +We have two controllers that ensure that buildruns can be deleted automatically if required. This is ensured by adding `retention` parameters in either the build specifications or the buildrun specifications. + +- Buildrun TTL parameters: These are used to make sure that buildruns exist for a fixed duration of time after completiion. + - `buildrun.spec.retention.ttlAfterFailed`: The buildrun is deleted if the mentioned duration of time has passed and the buildrun has failed. + - `buildrun.spec.retention.ttlAfterSucceeded`: The buildrun is deleted if the mentioned duration of time has passed and the buildrun has succeeded. +- Build TTL parameters: These are used to make sure that related buildruns exist for a fixed duration of time after completiion. + - `build.spec.retention.ttlAfterFailed`: The buildrun is deleted if the mentioned duration of time has passed and the buildrun has failed. + - `build.spec.retention.ttlAfterSucceeded`: The buildrun is deleted if the mentioned duration of time has passed and the buildrun has succeeded. +- Build Limit parameters: These are used to make sure that related buildruns exist for a fixed duration of time after completiion. + - `build.spec.retention.succeededLimit` - Defines number of succeeded BuildRuns for a Build that can exist. + - `build.spec.retention.failedLimit` - Defines number of failed BuildRuns for a Build that can exist. + ## Specifying Environment Variables An example of a `BuildRun` that specifies environment variables: diff --git a/pkg/apis/build/v1alpha1/build_types.go b/pkg/apis/build/v1alpha1/build_types.go index 7e1882268d..adbac319cd 100644 --- a/pkg/apis/build/v1alpha1/build_types.go +++ b/pkg/apis/build/v1alpha1/build_types.go @@ -138,6 +138,11 @@ type BuildSpec struct { // Env contains additional environment variables that should be passed to the build container // +optional Env []corev1.EnvVar `json:"env,omitempty"` + + // Contains information about retention params + // + // +optional + Retention *BuildRetention `json:"retention,omitempty"` } // StrategyName returns the name of the configured strategy, or 'undefined' in @@ -215,6 +220,30 @@ type BuildList struct { Items []Build `json:"items"` } +// BuildRetention struct for buildrun cleanup +type BuildRetention struct { + // FailedLimit defines the maximum number of failed buildruns that should exist. + // + // +optional + // +kubebuilder:validation:Minimum=1 + FailedLimit *uint `json:"failedLimit,omitempty"` + // SucceededLimit defines the maximum number of succeeded buildruns that should exist. + // + // +optional + // +kubebuilder:validation:Minimum=1 + SucceededLimit *uint `json:"succeededLimit,omitempty"` + // TTLAfterFailed defines the maximum duration of time the failed buildrun should exist. + // + // +optional + // +kubebuilder:validation:Format=duration + TTLAfterFailed *metav1.Duration `json:"ttlAfterFailed,omitempty"` + // TTLAfterSucceeded defines the maximum duration of time the succeeded buildrun should exist. + // + // +optional + // +kubebuilder:validation:Format=duration + TTLAfterSucceeded *metav1.Duration `json:"ttlAfterSucceeded,omitempty"` +} + func init() { SchemeBuilder.Register(&Build{}, &BuildList{}) } diff --git a/pkg/apis/build/v1alpha1/buildrun_types.go b/pkg/apis/build/v1alpha1/buildrun_types.go index 4a01e6e20d..9011576c3e 100644 --- a/pkg/apis/build/v1alpha1/buildrun_types.go +++ b/pkg/apis/build/v1alpha1/buildrun_types.go @@ -72,6 +72,10 @@ type BuildRunSpec struct { // // +optional Env []corev1.EnvVar `json:"env,omitempty"` + + // Contains information about retention params + // +optional + Retention *BuildRunRetention `json:"retention,omitempty"` } // BuildRunRequestedState defines the buildrun state the user can provide to override whatever is the current state. @@ -296,6 +300,20 @@ type Condition struct { Message string `json:"message" description:"human-readable message indicating details about last transition"` } +// BuildRunRetention struct for buildrun cleanup +type BuildRunRetention struct { + // TTLAfterFailed defines the maximum duration of time the failed buildrun should exist. + // + // +optional + // +kubebuilder:validation:Format=duration + TTLAfterFailed *metav1.Duration `json:"ttlAfterFailed,omitempty"` + // TTLAfterSucceeded defines the maximum duration of time the succeeded buildrun should exist. + // + // +optional + // +kubebuilder:validation:Format=duration + TTLAfterSucceeded *metav1.Duration `json:"ttlAfterSucceeded,omitempty"` +} + func init() { SchemeBuilder.Register(&BuildRun{}, &BuildRunList{}) } diff --git a/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go index dab8d4977f..a063404cc0 100644 --- a/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go @@ -97,6 +97,42 @@ func (in *BuildRef) DeepCopy() *BuildRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildRetention) DeepCopyInto(out *BuildRetention) { + *out = *in + if in.FailedLimit != nil { + in, out := &in.FailedLimit, &out.FailedLimit + *out = new(uint) + **out = **in + } + if in.SucceededLimit != nil { + in, out := &in.SucceededLimit, &out.SucceededLimit + *out = new(uint) + **out = **in + } + if in.TTLAfterFailed != nil { + in, out := &in.TTLAfterFailed, &out.TTLAfterFailed + *out = new(v1.Duration) + **out = **in + } + if in.TTLAfterSucceeded != nil { + in, out := &in.TTLAfterSucceeded, &out.TTLAfterSucceeded + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildRetention. +func (in *BuildRetention) DeepCopy() *BuildRetention { + if in == nil { + return nil + } + out := new(BuildRetention) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildRun) DeepCopyInto(out *BuildRun) { *out = *in @@ -158,6 +194,32 @@ func (in *BuildRunList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildRunRetention) DeepCopyInto(out *BuildRunRetention) { + *out = *in + if in.TTLAfterFailed != nil { + in, out := &in.TTLAfterFailed, &out.TTLAfterFailed + *out = new(v1.Duration) + **out = **in + } + if in.TTLAfterSucceeded != nil { + in, out := &in.TTLAfterSucceeded, &out.TTLAfterSucceeded + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildRunRetention. +func (in *BuildRunRetention) DeepCopy() *BuildRunRetention { + if in == nil { + return nil + } + out := new(BuildRunRetention) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildRunSpec) DeepCopyInto(out *BuildRunSpec) { *out = *in @@ -212,6 +274,11 @@ func (in *BuildRunSpec) DeepCopyInto(out *BuildRunSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = new(BuildRunRetention) + (*in).DeepCopyInto(*out) + } return } @@ -351,6 +418,11 @@ func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = new(BuildRetention) + (*in).DeepCopyInto(*out) + } return } diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 5901d69472..656f134b01 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -16,7 +16,9 @@ import ( "github.com/shipwright-io/build/pkg/config" "github.com/shipwright-io/build/pkg/ctxlog" "github.com/shipwright-io/build/pkg/reconciler/build" + "github.com/shipwright-io/build/pkg/reconciler/buildlimitcleanup" "github.com/shipwright-io/build/pkg/reconciler/buildrun" + "github.com/shipwright-io/build/pkg/reconciler/buildrunttlcleanup" "github.com/shipwright-io/build/pkg/reconciler/buildstrategy" "github.com/shipwright-io/build/pkg/reconciler/clusterbuildstrategy" ) @@ -63,5 +65,13 @@ func NewManager(ctx context.Context, config *config.Config, cfg *rest.Config, op return nil, err } + if err := buildlimitcleanup.Add(ctx, config, mgr); err != nil { + return nil, err + } + + if err := buildrunttlcleanup.Add(ctx, config, mgr); err != nil { + return nil, err + } + return mgr, nil } diff --git a/pkg/reconciler/buildlimitcleanup/build_limit_cleanup.go b/pkg/reconciler/buildlimitcleanup/build_limit_cleanup.go new file mode 100644 index 0000000000..69bca8008a --- /dev/null +++ b/pkg/reconciler/buildlimitcleanup/build_limit_cleanup.go @@ -0,0 +1,146 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package buildlimitcleanup + +import ( + "context" + "sort" + + build "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/build/pkg/config" + "github.com/shipwright-io/build/pkg/ctxlog" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ReconcileBuild reconciles a Build object +type ReconcileBuild struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver */ + config *config.Config + client client.Client +} + +func NewReconciler(c *config.Config, mgr manager.Manager) reconcile.Reconciler { + return &ReconcileBuild{ + config: c, + client: mgr.GetClient(), + } +} + +// Reconciler finds retentions fields in builds and makes sure the +// number of corresponding buildruns adhere to these limits +func (r *ReconcileBuild) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + // Set the ctx to be Background, as the top-level context for incoming requests. + ctx, cancel := context.WithTimeout(ctx, r.config.CtxTimeOut) + defer cancel() + + ctxlog.Debug(ctx, "Start reconciling build-limit-cleanup", namespace, request.Namespace, name, request.Name) + + b := &build.Build{} + err := r.client.Get(ctx, request.NamespacedName, b) + + if err != nil { + if apierrors.IsNotFound(err) { + ctxlog.Debug(ctx, "Finish reconciling build-limit-cleanup. Build was not found", namespace, request.Namespace, name, request.Name) + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // early exit if there is no retention section + if b.Spec.Retention == nil { + return reconcile.Result{}, nil + } + + // early exit if retention section has no limit + if b.Spec.Retention.SucceededLimit == nil && b.Spec.Retention.FailedLimit == nil { + return reconcile.Result{}, nil + } + + lbls := map[string]string{ + build.LabelBuild: b.Name, + } + opts := client.ListOptions{ + Namespace: b.Namespace, + LabelSelector: labels.SelectorFromSet(lbls), + } + allBuildRuns := &build.BuildRunList{} + + err = r.client.List(ctx, allBuildRuns, &opts) + if err != nil { + return reconcile.Result{}, err + } + + if len(allBuildRuns.Items) == 0 { + return reconcile.Result{}, nil + } + + var buildRunFailed []build.BuildRun + var buildRunSucceeded []build.BuildRun + + // Sort buildruns into successful ones and failed ones + for _, br := range allBuildRuns.Items { + condition := br.Status.GetCondition(build.Succeeded) + if condition != nil { + if condition.Status == corev1.ConditionFalse { + buildRunFailed = append(buildRunFailed, br) + } else if condition.Status == corev1.ConditionTrue { + buildRunSucceeded = append(buildRunSucceeded, br) + } + } + } + + // Check limits and delete oldest buildruns if limit is reached. + if b.Spec.Retention.SucceededLimit != nil { + if len(buildRunSucceeded) > int(*b.Spec.Retention.SucceededLimit) { + // Sort buildruns with oldest one at the beginning + sort.Slice(buildRunSucceeded, func(i, j int) bool { + return buildRunSucceeded[i].ObjectMeta.CreationTimestamp.Before(&buildRunSucceeded[j].ObjectMeta.CreationTimestamp) + }) + lenOfList := len(buildRunSucceeded) + for i := 0; i < lenOfList-int(*b.Spec.Retention.SucceededLimit); i++ { + ctxlog.Info(ctx, "Deleting succeeded buildrun as cleanup limit has been reached.", namespace, request.Namespace, name, buildRunSucceeded[i].Name) + err := r.client.Delete(ctx, &buildRunSucceeded[i], &client.DeleteOptions{}) + if err != nil { + if !apierrors.IsNotFound(err) { + ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, &buildRunSucceeded[i].Name, "error", err) + return reconcile.Result{}, err + } + ctxlog.Debug(ctx, "Error deleting buildRun. It has already been deleted.", namespace, request.Namespace, name, &buildRunSucceeded[i].Name) + } + } + } + } + + if b.Spec.Retention.FailedLimit != nil { + if len(buildRunFailed) > int(*b.Spec.Retention.FailedLimit) { + // Sort buildruns with oldest one at the beginning + sort.Slice(buildRunFailed, func(i, j int) bool { + return buildRunFailed[i].ObjectMeta.CreationTimestamp.Before(&buildRunFailed[j].ObjectMeta.CreationTimestamp) + }) + lenOfList := len(buildRunFailed) + for i := 0; i < lenOfList-int(*b.Spec.Retention.FailedLimit); i++ { + ctxlog.Info(ctx, "Deleting failed buildrun as cleanup limit has been reached.", namespace, request.Namespace, name, buildRunFailed[i].Name) + err := r.client.Delete(ctx, &buildRunFailed[i], &client.DeleteOptions{}) + if err != nil { + if !apierrors.IsNotFound(err) { + ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, &buildRunFailed[i].Name, "error", err) + return reconcile.Result{}, err + } + ctxlog.Debug(ctx, "Error deleting buildRun. It has already been deleted.", namespace, request.Namespace, name, &buildRunFailed[i].Name) + } + } + } + } + + ctxlog.Debug(ctx, "finishing reconciling request from a Build or BuildRun event", namespace, request.Namespace, name, request.Name) + + return reconcile.Result{}, nil +} diff --git a/pkg/reconciler/buildlimitcleanup/controller.go b/pkg/reconciler/buildlimitcleanup/controller.go new file mode 100644 index 0000000000..77e72ee70e --- /dev/null +++ b/pkg/reconciler/buildlimitcleanup/controller.go @@ -0,0 +1,135 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package buildlimitcleanup + +import ( + "context" + + buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/build/pkg/config" + "github.com/shipwright-io/build/pkg/ctxlog" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + namespace string = "namespace" + name string = "name" +) + +// Add creates a new build_limit_cleanup Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started +func Add(ctx context.Context, c *config.Config, mgr manager.Manager) error { + ctx = ctxlog.NewContext(ctx, "build-limit-cleanup-controller") + return add(ctx, mgr, NewReconciler(c, mgr), c.Controllers.Build.MaxConcurrentReconciles) +} +func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles int) error { + // Create the controller options + options := controller.Options{ + Reconciler: r, + } + + if maxConcurrentReconciles > 0 { + options.MaxConcurrentReconciles = maxConcurrentReconciles + } + + // Create a new controller + c, err := controller.New("build-limit-cleanup-controller", mgr, options) + if err != nil { + return err + } + + pred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + o := e.Object.(*buildv1alpha1.Build) + return o.Spec.Retention != nil && (o.Spec.Retention.FailedLimit != nil || o.Spec.Retention.SucceededLimit != nil) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + n := e.ObjectNew.(*buildv1alpha1.Build) + o := e.ObjectOld.(*buildv1alpha1.Build) + + // Check to see if there are new retention parameters or whether the + // limit values have decreased + if o.Spec.Retention == nil && n.Spec.Retention != nil { + if n.Spec.Retention.FailedLimit != nil || n.Spec.Retention.SucceededLimit != nil { + return true + } + } else if n.Spec.Retention != nil && o.Spec.Retention != nil { + if n.Spec.Retention.FailedLimit != nil && o.Spec.Retention.FailedLimit == nil { + return true + } else if n.Spec.Retention.SucceededLimit != nil && o.Spec.Retention.SucceededLimit == nil { + return true + } else if n.Spec.Retention.FailedLimit != nil && o.Spec.Retention.FailedLimit != nil && int(*n.Spec.Retention.FailedLimit) < int(*o.Spec.Retention.FailedLimit) { + return true + } else if n.Spec.Retention.SucceededLimit != nil && o.Spec.Retention.SucceededLimit != nil && int(*n.Spec.Retention.SucceededLimit) < int(*o.Spec.Retention.SucceededLimit) { + return true + } + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Never reconcile on deletion, there is nothing we have to do + return false + }, + } + + predBuildRun := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + // Never reconcile in case of create buildrun event + return false + }, + // Reconcile the build the related buildrun has just completed + UpdateFunc: func(e event.UpdateEvent) bool { + n := e.ObjectNew.(*buildv1alpha1.BuildRun) + + // check if Buildrun is related to a build + if n.Spec.BuildRef == nil { + return false + } + + o := e.ObjectOld.(*buildv1alpha1.BuildRun) + oldCondition := o.Status.GetCondition(buildv1alpha1.Succeeded) + newCondition := n.Status.GetCondition(buildv1alpha1.Succeeded) + if newCondition != nil { + if (oldCondition == nil || oldCondition.Status == corev1.ConditionUnknown) && + (newCondition.Status == corev1.ConditionFalse || newCondition.Status == corev1.ConditionTrue) { + return true + } + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Never reconcile on deletion, there is nothing we have to do + return false + }, + } + + // Watch for changes to primary resource Build + if err = c.Watch(&source.Kind{Type: &buildv1alpha1.Build{}}, &handler.EnqueueRequestForObject{}, pred); err != nil { + return err + } + + // Watch for changes to resource BuildRun + return c.Watch(&source.Kind{Type: &buildv1alpha1.BuildRun{}}, handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request { + buildRun := o.(*buildv1alpha1.BuildRun) + + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: buildRun.Spec.BuildRef.Name, + Namespace: buildRun.Namespace, + }, + }, + } + }), predBuildRun) +} diff --git a/pkg/reconciler/buildrunttlcleanup/buildrun_ttl_cleanup.go b/pkg/reconciler/buildrunttlcleanup/buildrun_ttl_cleanup.go new file mode 100644 index 0000000000..b1f581e7fd --- /dev/null +++ b/pkg/reconciler/buildrunttlcleanup/buildrun_ttl_cleanup.go @@ -0,0 +1,99 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package buildrunttlcleanup + +import ( + "context" + "time" + + buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/build/pkg/config" + "github.com/shipwright-io/build/pkg/ctxlog" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ReconcileBuildRun reconciles a BuildRun object +type ReconcileBuildRun struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + config *config.Config + client client.Client +} + +func NewReconciler(c *config.Config, mgr manager.Manager) reconcile.Reconciler { + return &ReconcileBuildRun{ + config: c, + client: mgr.GetClient(), + } +} + +// Reconcile makes sure the buildrun adheres to its ttl retention field and deletes it +// once the ttl limit is hit +func (r *ReconcileBuildRun) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + // Set the ctx to be Background, as the top-level context for incoming requests. + ctx, cancel := context.WithTimeout(ctx, r.config.CtxTimeOut) + defer cancel() + + ctxlog.Debug(ctx, "Start reconciling Buildrun-ttl", namespace, request.Namespace, name, request.Name) + + br := &buildv1alpha1.BuildRun{} + err := r.client.Get(ctx, types.NamespacedName{Name: request.Name, Namespace: request.Namespace}, br) + if err != nil { + if apierrors.IsNotFound(err) { + ctxlog.Debug(ctx, "Finish reconciling buildrun-ttl. Buildrun was not found", namespace, request.Namespace, name, request.Name) + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + condition := br.Status.GetCondition(buildv1alpha1.Succeeded) + if condition == nil || condition.Status == corev1.ConditionUnknown { + return reconcile.Result{}, nil + } + var ttl *metav1.Duration + if condition.Status == corev1.ConditionTrue { + if br.Spec.Retention != nil && br.Spec.Retention.TTLAfterSucceeded != nil { + ttl = br.Spec.Retention.TTLAfterSucceeded + } else if br.Status.BuildSpec != nil && br.Status.BuildSpec.Retention != nil && br.Status.BuildSpec.Retention.TTLAfterSucceeded != nil { + ttl = br.Status.BuildSpec.Retention.TTLAfterSucceeded + } + } else { + if br.Spec.Retention != nil && br.Spec.Retention.TTLAfterFailed != nil { + ttl = br.Spec.Retention.TTLAfterFailed + } else if br.Status.BuildSpec != nil && br.Status.BuildSpec.Retention != nil && br.Status.BuildSpec.Retention.TTLAfterFailed != nil { + ttl = br.Status.BuildSpec.Retention.TTLAfterFailed + } + } + + // check if BuildRun still has a TTL + if ttl == nil { + return reconcile.Result{}, nil + } + + if br.Status.CompletionTime.Add(ttl.Duration).Before(time.Now()) { + ctxlog.Info(ctx, "Deleting buildrun as ttl has been reached.", namespace, request.Namespace, name, request.Name) + err := r.client.Delete(ctx, br, &client.DeleteOptions{}) + if err != nil { + if !apierrors.IsNotFound(err) { + ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, br.Name, "error", err) + return reconcile.Result{}, err + } + ctxlog.Debug(ctx, "Error deleting buildRun. It has already been deleted.", namespace, request.Namespace, name, br.Name) + return reconcile.Result{}, nil + } + } else { + timeLeft := time.Until(br.Status.CompletionTime.Add(ttl.Duration)) + return reconcile.Result{Requeue: true, RequeueAfter: timeLeft}, nil + } + + ctxlog.Debug(ctx, "Finishing reconciling request from a BuildRun event", namespace, request.Namespace, name, request.Name) + return reconcile.Result{}, nil +} diff --git a/pkg/reconciler/buildrunttlcleanup/controller.go b/pkg/reconciler/buildrunttlcleanup/controller.go new file mode 100644 index 0000000000..46568672e8 --- /dev/null +++ b/pkg/reconciler/buildrunttlcleanup/controller.go @@ -0,0 +1,146 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package buildrunttlcleanup + +import ( + "context" + + buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/build/pkg/config" + "github.com/shipwright-io/build/pkg/ctxlog" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + namespace string = "namespace" + name string = "name" +) + +// Add creates a new BuildRun_ttl_cleanup Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(ctx context.Context, c *config.Config, mgr manager.Manager) error { + ctx = ctxlog.NewContext(ctx, "buildrun-ttl-cleanup-controller") + return add(ctx, mgr, NewReconciler(c, mgr), c.Controllers.BuildRun.MaxConcurrentReconciles) +} + +// reconcileCompletedBuildRun returns true if the object has the required TTL parameters +func reconcileCompletedBuildRun(condition *buildv1alpha1.Condition, o *buildv1alpha1.BuildRun) bool { + if condition.Status == corev1.ConditionTrue { + // check if a successful BuildRun has a TTL after succeeded value set + if o.Spec.Retention != nil && o.Spec.Retention.TTLAfterSucceeded != nil { + return true + } + + if o.Status.BuildSpec != nil && o.Status.BuildSpec.Retention != nil && o.Status.BuildSpec.Retention.TTLAfterSucceeded != nil { + return true + } + } else { + // check if a failed BuildRun has a TTL after failed + if o.Spec.Retention != nil && o.Spec.Retention.TTLAfterFailed != nil { + return true + } + + if o.Status.BuildSpec != nil && o.Status.BuildSpec.Retention != nil && o.Status.BuildSpec.Retention.TTLAfterFailed != nil { + return true + } + } + return false +} + +// reconcileAlreadyCompletedBuildRun returns true only if the TTL limit was introduced +// or if it was lowered as the object was completed before the update +func reconcileAlreadyCompletedBuildRun(newCondition *buildv1alpha1.Condition, oldCondition *buildv1alpha1.Condition, n *buildv1alpha1.BuildRun, o *buildv1alpha1.BuildRun) bool { + if newCondition.Status == corev1.ConditionTrue { + // check if a successful BuildRun has a TTL that was lowered or introduced + if (o.Spec.Retention == nil || o.Spec.Retention.TTLAfterSucceeded == nil) && n.Spec.Retention != nil && n.Spec.Retention.TTLAfterSucceeded != nil { + return true + } + + if o.Spec.Retention != nil && o.Spec.Retention.TTLAfterSucceeded != nil && n.Spec.Retention != nil && n.Spec.Retention.TTLAfterSucceeded != nil && n.Spec.Retention.TTLAfterSucceeded.Duration < o.Spec.Retention.TTLAfterSucceeded.Duration { + return true + } + } else { + // check if a failed BuildRun has a TTL that was lowered or introduced + if (o.Spec.Retention == nil || o.Spec.Retention.TTLAfterFailed == nil) && n.Spec.Retention != nil && n.Spec.Retention.TTLAfterFailed != nil { + return true + } + + if o.Spec.Retention != nil && o.Spec.Retention.TTLAfterFailed != nil && n.Spec.Retention != nil && n.Spec.Retention.TTLAfterFailed != nil && n.Spec.Retention.TTLAfterFailed.Duration < o.Spec.Retention.TTLAfterFailed.Duration { + return true + } + } + return false +} + +func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles int) error { + // Create the controller options + options := controller.Options{ + Reconciler: r, + } + + if maxConcurrentReconciles > 0 { + options.MaxConcurrentReconciles = maxConcurrentReconciles + + } + + c, err := controller.New("buildrun-ttl-cleanup-controller", mgr, options) + if err != nil { + return err + } + + predBuildRun := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + // Reconcile if TTL values are set + o := e.Object.(*buildv1alpha1.BuildRun) + + // ignore a running BuildRun + condition := o.Status.GetCondition(buildv1alpha1.Succeeded) + if condition == nil || condition.Status == corev1.ConditionUnknown { + return false + } + + return reconcileCompletedBuildRun(condition, o) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + // check if the updated object is completed + n := e.ObjectNew.(*buildv1alpha1.BuildRun) + newCondition := n.Status.GetCondition(buildv1alpha1.Succeeded) + if newCondition == nil || newCondition.Status == corev1.ConditionUnknown { + return false + } + + o := e.ObjectOld.(*buildv1alpha1.BuildRun) + oldCondition := o.Status.GetCondition(buildv1alpha1.Succeeded) + + // for objects that failed or just completed, check if a matching TTL is set + if oldCondition == nil || oldCondition.Status == corev1.ConditionUnknown { + return reconcileCompletedBuildRun(newCondition, n) + } + + // for objects that were already complete, check if the TTL was lowered or introduced + if oldCondition != nil && oldCondition.Status != corev1.ConditionUnknown { + return reconcileAlreadyCompletedBuildRun(newCondition, oldCondition, n, o) + } + + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Never reconcile on deletion, there is nothing we have to do + return false + }, + } + // Watch for changes to primary resource BuildRun + if err = c.Watch(&source.Kind{Type: &buildv1alpha1.BuildRun{}}, &handler.EnqueueRequestForObject{}, predBuildRun); err != nil { + return err + } + return nil +} diff --git a/test/build_samples.go b/test/build_samples.go index cce41843dd..120da8a822 100644 --- a/test/build_samples.go +++ b/test/build_samples.go @@ -506,3 +506,83 @@ spec: output: image: image-registry.openshift-image-registry.svc:5000/example/buildpacks-app ` + +// MinimalBuildWithRetentionTTLFive defines a simple +// Build with a source, a strategy and ttl +const MinimalBuildWithRetentionTTLFive = ` +apiVersion: shipwright.io/v1alpha1 +kind: Build +metadata: + name: build-retention-ttl +spec: + source: + url: "https://github.com/shipwright-io/sample-go" + contextDir: docker-build + strategy: + kind: ClusterBuildStrategy + output: + image: image-registry.openshift-image-registry.svc:5000/example/buildpacks-app + retention: + ttlAfterFailed: 5s + ttlAfterSucceeded: 5s +` + +// MinimalBuildWithRetentionLimitOne defines a simple +// Build with a source, a strategy and limits set as 1 +const MinimalBuildWithRetentionLimitOne = ` +apiVersion: shipwright.io/v1alpha1 +kind: Build +metadata: + name: build-retention-limit +spec: + source: + url: "https://github.com/shipwright-io/sample-go" + contextDir: docker-build + strategy: + kind: ClusterBuildStrategy + output: + image: image-registry.openshift-image-registry.svc:5000/example/buildpacks-app + retention: + failedLimit: 1 + succeededLimit: 1 +` + +// MinimalBuildWithRetentionLimitDiff defines a simple Build with a source, +// a strategy and different failed and succeeded limits +const MinimalBuildWithRetentionLimitDiff = ` +apiVersion: shipwright.io/v1alpha1 +kind: Build +metadata: + name: build-retention-limit +spec: + source: + url: "https://github.com/shipwright-io/sample-go" + contextDir: docker-build + strategy: + kind: ClusterBuildStrategy + output: + image: image-registry.openshift-image-registry.svc:5000/example/buildpacks-app + retention: + failedLimit: 1 + succeededLimit: 2 +` + +// MinimalBuildWithRetentionTTL defines a simple +// Build with a source, a strategy ttl +const MinimalBuildWithRetentionTTLOneMin = ` +apiVersion: shipwright.io/v1alpha1 +kind: Build +metadata: + name: build-retention-ttl +spec: + source: + url: "https://github.com/shipwright-io/sample-go" + contextDir: docker-build + strategy: + kind: ClusterBuildStrategy + output: + image: image-registry.openshift-image-registry.svc:5000/example/buildpacks-app + retention: + ttlAfterFailed: 1m + ttlAfterSucceeded: 1m +` diff --git a/test/buildrun_samples.go b/test/buildrun_samples.go index 8b9945527a..f583f36919 100644 --- a/test/buildrun_samples.go +++ b/test/buildrun_samples.go @@ -200,3 +200,43 @@ spec: buildRef: name: foobar ` + +// MinimalBuildRunRetention defines a minimal BuildRun +// with a reference used to test retention fields +const MinimalBuildRunRetention = ` +apiVersion: shipwright.io/v1alpha1 +kind: BuildRun +metadata: + name: buidrun-retention-ttl +spec: + buildRef: + name: build-retention-ttl +` + +// MinimalBuildRunRetention defines a minimal BuildRun +// with a reference used to test retention fields +const MinimalBuildRunRetentionTTLFive = ` +apiVersion: shipwright.io/v1alpha1 +kind: BuildRun +metadata: + name: buidrun-retention-ttl +spec: + buildRef: + name: build-retention-ttl + retention: + ttlAfterFailed: 5s + ttlAfterSucceeded: 5s +` + +const MinimalBuildahBuildRunWithExitCode = ` +apiVersion: shipwright.io/v1alpha1 +kind: BuildRun +metadata: + name: buildah-run +spec: + paramValues: + - name: exit-command + value: "true" + buildRef: + name: buildah +` diff --git a/test/clusterbuildstrategy_samples.go b/test/clusterbuildstrategy_samples.go index 16e0837e7c..77d23d7d86 100644 --- a/test/clusterbuildstrategy_samples.go +++ b/test/clusterbuildstrategy_samples.go @@ -217,6 +217,10 @@ kind: ClusterBuildStrategy metadata: name: noop spec: + parameters: + - name: exit-command + description: "Exit command for the pod" + default: "true" buildSteps: - name: step-no-and-op image: alpine:latest @@ -240,7 +244,7 @@ spec: - name: AWS_SECRET_KEY value: NOT_SET command: - - "true" + - $(params.exit-command) resources: limits: cpu: 250m diff --git a/test/integration/buildrun_cleanup_test.go b/test/integration/buildrun_cleanup_test.go new file mode 100644 index 0000000000..51c587ad6d --- /dev/null +++ b/test/integration/buildrun_cleanup_test.go @@ -0,0 +1,448 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 +package integration_test + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/build/test" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +var _ = Describe("Integration tests for retention limits and ttls for succeeded buildruns.", func() { + var ( + cbsObject *v1alpha1.ClusterBuildStrategy + buildObject *v1alpha1.Build + buildRunObject *v1alpha1.BuildRun + buildSample []byte + buildRunSample []byte + ) + + // Load the ClusterBuildStrategies before each test case + BeforeEach(func() { + cbsObject, err = tb.Catalog.LoadCBSWithName(STRATEGY+tb.Namespace, []byte(test.ClusterBuildStrategyNoOp)) + Expect(err).To(BeNil()) + + err = tb.CreateClusterBuildStrategy(cbsObject) + Expect(err).To(BeNil()) + }) + + AfterEach(func() { + _, err = tb.GetBuild(buildObject.Name) + if err == nil { + Expect(tb.DeleteBuild(buildObject.Name)).To(BeNil()) + } + + err := tb.DeleteClusterBuildStrategy(cbsObject.Name) + Expect(err).To(BeNil()) + }) + + JustBeforeEach(func() { + if buildSample != nil { + buildObject, err = tb.Catalog.LoadBuildWithNameAndStrategy(BUILD+tb.Namespace, STRATEGY+tb.Namespace, buildSample) + Expect(err).To(BeNil()) + } + + if buildRunSample != nil { + buildRunObject, err = tb.Catalog.LoadBRWithNameAndRef(BUILDRUN+tb.Namespace, BUILD+tb.Namespace, buildRunSample) + Expect(err).To(BeNil()) + } + }) + + Context("When a buildrun related to a build with short ttl set succeeds", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionTTLFive) + buildRunSample = []byte(test.MinimalBuildRunRetention) + }) + + It("Should not find the buildrun after few seconds after it succeeds", func() { + + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("When a buildrun with short ttl set succeeds. TTL field exists in the buildrun spec", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should not find the buildrun few seconds after it succeeds", func() { + + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("When a buildrun with short ttl set fails because buildref not found. TTL field exists in the buildrun spec", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should not find the buildrun few seconds after it fails", func() { + + buildRunObject.Spec.BuildRef.Name = "non-existent-buildref" + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("When we lower a successfully completed buildrun's ttl retention parameters. Original param - 5m, Updated param - 5s", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should not find the buildrun few seconds after it succeeds", func() { + + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + // Make the TTL 5 minutes + buildRunObject.Spec.Retention.TTLAfterSucceeded.Duration = time.Minute * 5 + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + + // Make the TTL 5 seconds + br.Spec.Retention.TTLAfterSucceeded.Duration = time.Second * 5 + Expect(tb.UpdateBR(br)).To(BeNil()) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("Multiple successful buildruns related to build with limit 1", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetention) + }) + + It("Should not find the older successful buildrun.", func() { + + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + // Create first buildrun + buildRunObject.Name = BUILDRUN + tb.Namespace + "-1" + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br1, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br1.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + // Create second buildrun + buildRunObject.Name = BUILDRUN + tb.Namespace + "-2" + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br2, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br2.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + + _, err = tb.GetBRTillNotFound(BUILDRUN+tb.Namespace+"-1", time.Second*1, time.Second*5) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + _, err = tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-2") + Expect(err).To(BeNil()) + }) + + }) + + Context("When a buildrun that has TTL defined in its spec and in the corresponding build's spec succeeds", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionTTLOneMin) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should honor the TTL defined in the buildrun.", func() { + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + + }) + }) + + Context("Multiple buildruns with different build limits for failure and success", func() { + + BeforeEach(func() { + Expect(err).To(BeNil()) + buildSample = []byte(test.MinimalBuildWithRetentionLimitDiff) + buildRunSample = []byte(test.MinimalBuildahBuildRunWithExitCode) + }) + // Case with failedLimit 1 and succeededLimit 2. + // It ensures that only relevant buildruns are affected by retention parameters + It("Should not find the old failed buildrun if the limit has been triggered", func() { + + // Create build + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + // Create 2 successful buildruns + buildRunObject.Name = BUILDRUN + tb.Namespace + "-success-1" + err = tb.CreateBR(buildRunObject) + + buildRunObject.Name = BUILDRUN + tb.Namespace + "-success-2" + err = tb.CreateBR(buildRunObject) + + // Create 1 failed buildrun + buildRunObject.Name = BUILDRUN + tb.Namespace + "-fail-1" + str := "false" + falseParam := v1alpha1.ParamValue{Name: "exit-command", SingleValue: &v1alpha1.SingleValue{Value: &str}} + buildRunObject.Spec.ParamValues = []v1alpha1.ParamValue{falseParam} + err = tb.CreateBR(buildRunObject) + + // Wait for buildrun completion + br1, err := tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-success-1") + Expect(err).To(BeNil()) + Expect(br1.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + + br2, err := tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-success-2") + Expect(err).To(BeNil()) + Expect(br2.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + + br3, err := tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-fail-1") + Expect(err).To(BeNil()) + Expect(br3.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + + // Create 1 failed buildrun. + buildRunObject.Name = BUILDRUN + tb.Namespace + "-fail-2" + buildRunObject.Spec.ParamValues = []v1alpha1.ParamValue{falseParam} + err = tb.CreateBR(buildRunObject) + Expect(err).To(BeNil()) + br4, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br4.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + + // Check that the older failed buildrun has been deleted while the successful buildruns exist + _, err = tb.GetBRTillNotFound(BUILDRUN+tb.Namespace+"-fail-1", time.Second*1, time.Second*5) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + _, err = tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-success-1") + Expect(err).To(BeNil()) + _, err = tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-success-2") + Expect(err).To(BeNil()) + }) + }) + +}) + +var _ = Describe("Integration tests for retention limits and ttls of buildRuns that fail", func() { + var ( + cbsObject *v1alpha1.ClusterBuildStrategy + buildObject *v1alpha1.Build + buildRunObject *v1alpha1.BuildRun + buildSample []byte + buildRunSample []byte + ) + // Load the ClusterBuildStrategies before each test case + BeforeEach(func() { + cbsObject, err = tb.Catalog.LoadCBSWithName(STRATEGY+tb.Namespace, []byte(test.ClusterBuildStrategyNoOp)) + Expect(err).To(BeNil()) + + err = tb.CreateClusterBuildStrategy(cbsObject) + Expect(err).To(BeNil()) + }) + AfterEach(func() { + + _, err = tb.GetBuild(buildObject.Name) + if err == nil { + Expect(tb.DeleteBuild(buildObject.Name)).To(BeNil()) + } + + err := tb.DeleteClusterBuildStrategy(cbsObject.Name) + Expect(err).To(BeNil()) + }) + + JustBeforeEach(func() { + if buildSample != nil { + buildObject, err = tb.Catalog.LoadBuildWithNameAndStrategy(BUILD+tb.Namespace, STRATEGY+tb.Namespace, buildSample) + Expect(err).To(BeNil()) + } + + if buildRunSample != nil { + buildRunObject, err = tb.Catalog.LoadBRWithNameAndRef(BUILDRUN+tb.Namespace, BUILD+tb.Namespace, buildRunSample) + Expect(err).To(BeNil()) + str := "false" + falseParam := v1alpha1.ParamValue{Name: "exit-command", SingleValue: &v1alpha1.SingleValue{Value: &str}} + buildRunObject.Spec.ParamValues = []v1alpha1.ParamValue{falseParam} + } + }) + + Context("When a buildrun related to a build with short ttl set fails", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionTTLFive) + buildRunSample = []byte(test.MinimalBuildRunRetention) + }) + + It("Should not find the buildrun few seconds after it fails", func() { + + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("When we lower a failed completed buildrun's ttl retention parameters. Original param - 5m, Updated param - 5s", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should not find the buildrun few seconds after it succeeds", func() { + + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + // Make the TTL 5 minutes + buildRunObject.Spec.Retention.TTLAfterFailed.Duration = time.Minute * 5 + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + + // Make the TTL 5 seconds + br.Spec.Retention.TTLAfterFailed.Duration = time.Second * 5 + Expect(tb.UpdateBR(br)).To(BeNil()) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("When a buildrun with short ttl set fails. TTL is defined in the buildrun specs", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should not find the buildrun few seconds after it fails", func() { + + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("When a buildrun that has TTL defined in its spec and on the corresponding build's spec fails", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionTTLOneMin) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should honor the TTL defined in the buildrun.", func() { + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + + }) + }) + + Context("Multiple failed buildruns related to build with limit 1", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetention) + }) + + It("Should not find the older failed buildrun", func() { + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + // Create first buildrun + buildRunObject.Name = BUILDRUN + tb.Namespace + "-1" + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br1, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br1.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + // Create second buildrun + buildRunObject.Name = BUILDRUN + tb.Namespace + "-2" + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br2, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br2.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + + _, err = tb.GetBRTillNotFound(BUILDRUN+tb.Namespace+"-1", time.Second*1, time.Second*5) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + _, err = tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-2") + Expect(err).To(BeNil()) + }) + + }) +}) diff --git a/test/utils/buildruns.go b/test/utils/buildruns.go index c25a12b4c0..afa3395c2e 100644 --- a/test/utils/buildruns.go +++ b/test/utils/buildruns.go @@ -9,6 +9,7 @@ import ( "encoding/json" "errors" "fmt" + "time" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -108,6 +109,31 @@ func (t *TestBuild) GetBRTillCompletion(name string) (*v1alpha1.BuildRun, error) return brInterface.Get(context.TODO(), name, metav1.GetOptions{}) } +// GetBRTillNotFound waits for the buildrun to get deleted. It returns an error if BuildRun is not found +func (t *TestBuild) GetBRTillNotFound(name string, interval time.Duration, timeout time.Duration) (*v1alpha1.BuildRun, error) { + + var ( + GetBRTillNotFound = func() (bool, error) { + + bInterface := t.BuildClientSet.ShipwrightV1alpha1().BuildRuns(t.Namespace) + _, err := bInterface.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return true, err + } + return false, nil + } + ) + + brInterface := t.BuildClientSet.ShipwrightV1alpha1().BuildRuns(t.Namespace) + + err := wait.PollImmediate(interval, timeout, GetBRTillNotFound) + if err != nil { + return nil, err + } + + return brInterface.Get(context.TODO(), name, metav1.GetOptions{}) +} + // GetBRTillNotOwner returns a BuildRun that has not an owner. // If the timeout is reached or it fails when retrieving the BuildRun it will // stop polling and return