From b77036f85ab465a5b7bafd863f8325b127d9b4db Mon Sep 17 00:00:00 2001 From: Raghav Bhatnagar Date: Tue, 29 Mar 2022 13:40:24 +0530 Subject: [PATCH 1/2] Added automatic buildrun cleanup functionality to improve buildrun management. Two controllers have been added - buildrun_ttl_cleanup controller and build_limit_cleanup controller. - buildrun_ttl_cleanup controller watches buildruns that are created or updated. - build_limit_cleanup controller watches builds that are created or updated and watches buildruns that are updated. Build specifications were extended by adding a retention parameter that has 4 optional fields - ttlAfterFailed: buildrun is deleted if the mentioned duration of time has passed and the buildrun has failed. - ttlAfterSucceeded: buildrun is deleted if the mentioned duration of time has passed and the buildrun has succeeded. - failedLimit: If this limit is exceeded, the oldest failed buildruns are deleted till the limit is satisfied. - succeededLimit: If this limit is exceeded, the oldest successful buildruns are deleted till the limit is satisfied. Tests have been added to ensure that these controllers are working as intended. Signed-off-by: Pravallika Dindukurthi --- deploy/crds/shipwright.io_buildruns.yaml | 24 ++ deploy/crds/shipwright.io_builds.yaml | 24 ++ docs/build.md | 56 +++- docs/buildrun.md | 21 ++ pkg/apis/build/v1alpha1/build_types.go | 29 ++ .../build/v1alpha1/zz_generated.deepcopy.go | 41 +++ pkg/controller/controller.go | 10 + .../build_limit_cleanup.go | 134 ++++++++ .../build_limit_cleanup/controller.go | 125 ++++++++ .../buildrun_ttl_cleanup.go | 117 +++++++ .../buildrun_ttl_cleanup/controller.go | 86 ++++++ test/build_samples.go | 60 ++++ test/buildrun_samples.go | 12 + test/clusterbuildstrategy_samples.go | 32 ++ test/integration/buildrun_cleanup_test.go | 292 ++++++++++++++++++ test/utils/buildruns.go | 26 ++ 16 files changed, 1088 insertions(+), 1 deletion(-) create mode 100644 pkg/reconciler/build_limit_cleanup/build_limit_cleanup.go create mode 100644 pkg/reconciler/build_limit_cleanup/controller.go create mode 100644 pkg/reconciler/buildrun_ttl_cleanup/buildrun_ttl_cleanup.go create mode 100644 pkg/reconciler/buildrun_ttl_cleanup/controller.go create mode 100644 test/integration/buildrun_cleanup_test.go diff --git a/deploy/crds/shipwright.io_buildruns.yaml b/deploy/crds/shipwright.io_buildruns.yaml index 5d6b1cf33c..d7ac557282 100644 --- a/deploy/crds/shipwright.io_buildruns.yaml +++ b/deploy/crds/shipwright.io_buildruns.yaml @@ -352,6 +352,30 @@ spec: - name type: object type: array + retention: + description: Contains information about retention params + properties: + failedLimit: + description: FailedLimit defines the maximum number of failed + buildruns that should exist. + minimum: 1 + type: integer + succeededLimit: + description: SucceededLimit defines the maximum number of + succeeded buildruns that should exist. + minimum: 1 + type: integer + ttlAfterFailed: + description: TtlAfterFailed defines the maximum duration of + time the failed buildrun should exist. + format: duration + type: string + ttlAfterSucceeded: + description: TtlAfterFailed defines the maximum duration of + time the succeeded buildrun should exist. + format: duration + type: string + type: object source: description: Source refers to the Git repository containing the source code to be built. diff --git a/deploy/crds/shipwright.io_builds.yaml b/deploy/crds/shipwright.io_builds.yaml index 508b7bc1c8..f28ac4ac02 100644 --- a/deploy/crds/shipwright.io_builds.yaml +++ b/deploy/crds/shipwright.io_builds.yaml @@ -331,6 +331,30 @@ spec: - name type: object type: array + retention: + description: Contains information about retention params + properties: + failedLimit: + description: FailedLimit defines the maximum number of failed + buildruns that should exist. + minimum: 1 + type: integer + succeededLimit: + description: SucceededLimit defines the maximum number of succeeded + buildruns that should exist. + minimum: 1 + type: integer + ttlAfterFailed: + description: TtlAfterFailed defines the maximum duration of time + the failed buildrun should exist. + format: duration + type: string + ttlAfterSucceeded: + description: TtlAfterFailed defines the maximum duration of time + the succeeded buildrun should exist. + format: duration + type: string + type: object source: description: Source refers to the Git repository containing the source code to be built. diff --git a/docs/build.md b/docs/build.md index e834a36808..2e6706b5c0 100644 --- a/docs/build.md +++ b/docs/build.md @@ -15,6 +15,7 @@ SPDX-License-Identifier: Apache-2.0 - [Defining ParamValues](#defining-paramvalues) - [Defining the Builder or Dockerfile](#defining-the-builder-or-dockerfile) - [Defining the Output](#defining-the-output) + - [Defining Retention Parameters](#defining-retention-parameters) - [BuildRun deletion](#BuildRun-deletion) ## Overview @@ -29,6 +30,7 @@ A `Build` resource allows the user to define: - dockerfile - output - env +- retention A `Build` is available within a namespace. @@ -87,7 +89,11 @@ The `Build` definition supports the following fields: - `metadata.annotations[build.shipwright.io/build-run-deletion]` - Defines if delete all related BuildRuns when deleting the Build. The default is `false`. - `spec.output.annotations` - Refers to a list of `key/value` that could be used to [annotate](https://github.com/opencontainers/image-spec/blob/main/annotations.md) the output image. - `spec.output.labels` - Refers to a list of `key/value` that could be used to label the output image. - - `spec.env` - Specifies additional environment variables that should be passed to the build container. The available variables depend on the tool used by the chosen build strategy. + - `spec.env` - Specifies additional environment variables that should be passed to the build container. The available variables depend on the tool that is being used by the chosen build strategy. + - `spec.retention.ttlAfterFailed` - Specifies the duration for which a failed buildrun can exist. + - `spec.retention.ttlAfterSucceeded` - Specifies the duration for which a successful buildrun can exist. + - `spec.retention.failedLimit` - Specifies the number of failed buildrun that can exist. + - `spec.retention.succeededLimit` - Specifies the number of successful buildrun can exist. ### Defining the Source @@ -530,6 +536,54 @@ You can verify which labels were added to the output image that is available on docker inspect us.icr.io/source-to-image-build/nodejs-ex | jq ".[].Config.Labels" ``` +### Defining Retention Parameters + +A `Build` resource can specify how long a completed BuildRun can exist and the number of buildruns that have failed or succeeded that should exist. Instead of manually cleaning up old BuildRuns, retention parameters provide an alternate method for cleaning up BuildRuns automatically. + +As part of the retention parameters, we have the following fields: + +- `retention.succeededLimit` - Defines number of succeeded BuildRuns for a Build that can exist. +- `retention.failedLimit` - Defines number of failed BuildRuns for a Build that can exist. +- `retention.ttlAfterFailed` - Specifies the duration for which a failed buildrun can exist. +- `retention.ttlAfterSucceeded` - Specifies the duration for which a successful buildrun can exist. + +An example of a user using both TTL and Limit retention fields. In case of such a configuration, BuildRun will get deleted once the first criteria is met. + +```yaml + apiVersion: shipwright.io/v1alpha1 + kind: Build + metadata: + name: build-retention-ttl + spec: + source: + url: "https://github.com/shipwright-io/sample-go" + contextDir: docker-build + strategy: + kind: ClusterBuildStrategy + output: + ... + retention: + ttlAfterFailed: 30m + ttlAfterSucceeded: 1h + failedLimit: 10 + succeededLimit: 20 +``` + +**NOTE**: When changes are made to `retention.failedLimit` and `retention.succeededLimit` values, they come into effect as soon as the build is applied, thereby enforcing the new limits. On the other hand, changing the `retention.ttlAfterFailed` and `retention.ttlAfterSucceeded` values will only affect new buildruns. Old buildruns will adhere to the old TTL retention values. + +#### Build_limit_cleanup controller + +Builds are watched by the build_limit_cleanup controller for the following preconditions: + +- Update on `Build` resource if either `retention.failedLimit` or `retention.succeeded` are set +- Create on `Build` resource if either `retention.failedLimit` or `retention.succeeded` are set + +build_limit_cleanup also watches buildruns for the following precondition: + +- Update on `BuildRun` resource if either `retention.failedLimit` or `retention.succeeded` are set and the Buildrun has just completed execution. + +If these conditions are met, the reconciler calculates the number of buildruns that exists and deletes the oldest ones till the limit is satisfied. + ### Sources Sources represent remote artifacts, as in external entities added to the build context before the actual Build starts. Therefore, you may employ `.spec.sources` to download artifacts from external repositories. diff --git a/docs/buildrun.md b/docs/buildrun.md index c1b941b274..906300e083 100644 --- a/docs/buildrun.md +++ b/docs/buildrun.md @@ -14,6 +14,7 @@ SPDX-License-Identifier: Apache-2.0 - [Defining ParamValues](#defining-paramvalues) - [Defining the ServiceAccount](#defining-the-serviceaccount) - [Canceling a `BuildRun`](#canceling-a-buildrun) +- [Automatic `BuildRun` deletion](#automatic-buildrun-deletion) - [Specifying Environment Variables](#specifying-environment-variables) - [BuildRun Status](#buildrun-status) - [Understanding the state of a BuildRun](#understanding-the-state-of-a-buildrun) @@ -184,6 +185,26 @@ spec: state: "BuildRunCanceled" ``` +## Automatic `BuildRun` deletion + +We have two controllers that ensure that buildruns can be deleted automatically if required. This is ensured by adding `retention` parameters in build specifications. + +- TTL parameters: These are used by the buildrun_ttl_cleanup controller. + - `build.spec.retention.ttlAfterFailed`: The buildrun is deleted if the mentioned duration of time has passed and the buildrun has failed. + - `build.spec.retention.ttlAfterSucceeded`: The buildrun is deleted if the mentioned duration of time has passed and the buildrun has succeeded. +- Limit parameters: These are used by the build_limit_cleanup controller. + - `build.spec.retention.failedLimit`: This parameter ensures that the number of failed buildruns do not exceed this limit. If the limit is exceeded, the oldest failed buildruns are deleted till the limit is satisfied. + - `build.spec.retention.succeededLimit`: This parameter ensures that the number of successful buildruns do not exceed this limit. If the limit is exceeded, the oldest successful buildruns are deleted till the limit is satisfied. + +### Buildrun_ttl_cleanup controller + +Buildruns are watched by the buildrun_ttl_cleanup controller for the following preconditions: + +- Update on `Buildrun` resource if either `ttlAfterSucceeded` or `ttlAfterFailed` are set. +- Create on `Buildrun` resource if either `ttlAfterSucceeded` or `ttlAfterFailed` are set. + +If these conditions are met, the reconciler checks if the ttl retention field is satisfied or not. If it is not, the buildrun is requeued after calculating the remaining time the buildrun is allowed to exist. + ## Specifying Environment Variables An example of a `BuildRun` that specifies environment variables: diff --git a/pkg/apis/build/v1alpha1/build_types.go b/pkg/apis/build/v1alpha1/build_types.go index 7e1882268d..ba76653898 100644 --- a/pkg/apis/build/v1alpha1/build_types.go +++ b/pkg/apis/build/v1alpha1/build_types.go @@ -138,6 +138,11 @@ type BuildSpec struct { // Env contains additional environment variables that should be passed to the build container // +optional Env []corev1.EnvVar `json:"env,omitempty"` + + // Contains information about retention params + // + // +optional + Retention *BuildRetention `json:"retention,omitempty"` } // StrategyName returns the name of the configured strategy, or 'undefined' in @@ -215,6 +220,30 @@ type BuildList struct { Items []Build `json:"items"` } +// Retention struct for buildrun cleanup +type BuildRetention struct { + // FailedLimit defines the maximum number of failed buildruns that should exist. + // + // +optional + // +kubebuilder:validation:Minimum=1 + FailedLimit *uint `json:"failedLimit,omitempty"` + // SucceededLimit defines the maximum number of succeeded buildruns that should exist. + // + // +optional + // +kubebuilder:validation:Minimum=1 + SucceededLimit *uint `json:"succeededLimit,omitempty"` + // TtlAfterFailed defines the maximum duration of time the failed buildrun should exist. + // + // +optional + // +kubebuilder:validation:Format=duration + TtlAfterFailed *metav1.Duration `json:"ttlAfterFailed,omitempty"` + // TtlAfterFailed defines the maximum duration of time the succeeded buildrun should exist. + // + // +optional + // +kubebuilder:validation:Format=duration + TtlAfterSucceeded *metav1.Duration `json:"ttlAfterSucceeded,omitempty"` +} + func init() { SchemeBuilder.Register(&Build{}, &BuildList{}) } diff --git a/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go index dab8d4977f..98e5e3254c 100644 --- a/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go @@ -97,6 +97,42 @@ func (in *BuildRef) DeepCopy() *BuildRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildRetention) DeepCopyInto(out *BuildRetention) { + *out = *in + if in.FailedLimit != nil { + in, out := &in.FailedLimit, &out.FailedLimit + *out = new(uint) + **out = **in + } + if in.SucceededLimit != nil { + in, out := &in.SucceededLimit, &out.SucceededLimit + *out = new(uint) + **out = **in + } + if in.TtlAfterFailed != nil { + in, out := &in.TtlAfterFailed, &out.TtlAfterFailed + *out = new(v1.Duration) + **out = **in + } + if in.TtlAfterSucceeded != nil { + in, out := &in.TtlAfterSucceeded, &out.TtlAfterSucceeded + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildRetention. +func (in *BuildRetention) DeepCopy() *BuildRetention { + if in == nil { + return nil + } + out := new(BuildRetention) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildRun) DeepCopyInto(out *BuildRun) { *out = *in @@ -351,6 +387,11 @@ func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = new(BuildRetention) + (*in).DeepCopyInto(*out) + } return } diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 5901d69472..ea24555ccf 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -16,7 +16,9 @@ import ( "github.com/shipwright-io/build/pkg/config" "github.com/shipwright-io/build/pkg/ctxlog" "github.com/shipwright-io/build/pkg/reconciler/build" + "github.com/shipwright-io/build/pkg/reconciler/build_limit_cleanup" "github.com/shipwright-io/build/pkg/reconciler/buildrun" + "github.com/shipwright-io/build/pkg/reconciler/buildrun_ttl_cleanup" "github.com/shipwright-io/build/pkg/reconciler/buildstrategy" "github.com/shipwright-io/build/pkg/reconciler/clusterbuildstrategy" ) @@ -63,5 +65,13 @@ func NewManager(ctx context.Context, config *config.Config, cfg *rest.Config, op return nil, err } + if err := build_limit_cleanup.Add(ctx, config, mgr); err != nil { + return nil, err + } + + if err := buildrun_ttl_cleanup.Add(ctx, config, mgr); err != nil { + return nil, err + } + return mgr, nil } diff --git a/pkg/reconciler/build_limit_cleanup/build_limit_cleanup.go b/pkg/reconciler/build_limit_cleanup/build_limit_cleanup.go new file mode 100644 index 0000000000..4365d744a8 --- /dev/null +++ b/pkg/reconciler/build_limit_cleanup/build_limit_cleanup.go @@ -0,0 +1,134 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package build_limit_cleanup + +import ( + "context" + "sort" + + build "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/build/pkg/config" + "github.com/shipwright-io/build/pkg/ctxlog" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ReconcileBuild reconciles a Build object +type ReconcileBuild struct { + /* This client, initialized using mgr.Client() above, is a split client + that reads objects from the cache and writes to the apiserver */ + config *config.Config + client client.Client + scheme *runtime.Scheme + setOwnerReferenceFunc setOwnerReferenceFunc +} + +func NewReconciler(c *config.Config, mgr manager.Manager, ownerRef setOwnerReferenceFunc) reconcile.Reconciler { + return &ReconcileBuild{ + config: c, + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + setOwnerReferenceFunc: ownerRef, + } +} + +/* Reconciler finds retentions fields in builds and makes sure the + number of corresponding buildruns adhere to these limits */ +func (r *ReconcileBuild) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + // Set the ctx to be Background, as the top-level context for incoming requests. + ctx, cancel := context.WithTimeout(ctx, r.config.CtxTimeOut) + defer cancel() + + ctxlog.Debug(ctx, "Start reconciling build-limit-cleanup", namespace, request.Namespace, name, request.Name) + + b := &build.Build{} + err := r.client.Get(ctx, request.NamespacedName, b) + + if err != nil { + if apierrors.IsNotFound(err) { + ctxlog.Debug(ctx, "Finish reconciling build-limit-cleanup. Build was not found", namespace, request.Namespace, name, request.Name) + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + lbls := map[string]string{ + build.LabelBuild: b.Name, + } + opts := client.ListOptions{ + Namespace: b.Namespace, + LabelSelector: labels.SelectorFromSet(lbls), + } + allBuildRuns := &build.BuildRunList{} + r.client.List(ctx, allBuildRuns, &opts) + if len(allBuildRuns.Items) == 0 { + return reconcile.Result{}, nil + } + + var buildRunFailed []build.BuildRun + var buildRunSucceeded []build.BuildRun + + // Sort buildruns into successful ones and failed ones + for _, br := range allBuildRuns.Items { + condition := br.Status.GetCondition(build.Succeeded) + if condition != nil { + if condition.Status == corev1.ConditionFalse { + buildRunFailed = append(buildRunFailed, br) + } else if condition.Status == corev1.ConditionTrue { + buildRunSucceeded = append(buildRunSucceeded, br) + } + } + } + + // Check limits and delete oldest buildruns if limit is reached. + if b.Spec.Retention.SucceededLimit != nil { + if len(buildRunSucceeded) > int(*b.Spec.Retention.SucceededLimit) { + sort.Slice(buildRunSucceeded, func(i, j int) bool { + return buildRunSucceeded[i].Status.CompletionTime.Before(buildRunSucceeded[j].Status.CompletionTime) + }) + lenOfList := len(buildRunSucceeded) + for i := 0; lenOfList-i > int(*b.Spec.Retention.SucceededLimit); i += 1 { + ctxlog.Info(ctx, "Deleting succeeded buildrun as cleanup limit has been reached.", namespace, request.Namespace, name, buildRunSucceeded[i].Name) + deleteBuildRunErr := r.client.Delete(ctx, &buildRunSucceeded[i], &client.DeleteOptions{}) + if deleteBuildRunErr != nil { + if apierrors.IsNotFound(deleteBuildRunErr) { + return reconcile.Result{}, nil + } + ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, &buildRunSucceeded[i].Name, deleteError, deleteBuildRunErr) + return reconcile.Result{}, nil + } + } + } + } + + if b.Spec.Retention.FailedLimit != nil { + if len(buildRunFailed) > int(*b.Spec.Retention.FailedLimit) { + sort.Slice(buildRunFailed, func(i, j int) bool { + return buildRunFailed[i].Status.CompletionTime.Before(buildRunFailed[j].Status.CompletionTime) + }) + lenOfList := len(buildRunFailed) + for i := 0; lenOfList-i > int(*b.Spec.Retention.FailedLimit); i += 1 { + ctxlog.Info(ctx, "Deleting failed buildrun as cleanup limit has been reached.", namespace, request.Namespace, name, buildRunFailed[i].Name) + deleteBuildRunErr := r.client.Delete(ctx, &buildRunFailed[i], &client.DeleteOptions{}) + if deleteBuildRunErr != nil { + if apierrors.IsNotFound(deleteBuildRunErr) { + return reconcile.Result{}, nil + } + ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, buildRunFailed[i].Name, deleteError, deleteBuildRunErr) + return reconcile.Result{}, nil + } + } + } + } + + ctxlog.Debug(ctx, "finishing reconciling request from a Build or BuildRun event", namespace, request.Namespace, name, request.Name) + + return reconcile.Result{}, nil +} diff --git a/pkg/reconciler/build_limit_cleanup/controller.go b/pkg/reconciler/build_limit_cleanup/controller.go new file mode 100644 index 0000000000..9e85c70660 --- /dev/null +++ b/pkg/reconciler/build_limit_cleanup/controller.go @@ -0,0 +1,125 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package build_limit_cleanup + +import ( + "context" + + build "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/build/pkg/config" + "github.com/shipwright-io/build/pkg/ctxlog" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + namespace string = "namespace" + name string = "name" + deleteError string = "error" +) + +type setOwnerReferenceFunc func(owner, object metav1.Object, scheme *runtime.Scheme) error + +// Add creates a new Build Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(ctx context.Context, c *config.Config, mgr manager.Manager) error { + ctx = ctxlog.NewContext(ctx, "build-limit-cleanup-controller") + return add(ctx, mgr, NewReconciler(c, mgr, controllerutil.SetControllerReference), c.Controllers.Build.MaxConcurrentReconciles) +} + +func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles int) error { + // Create the controller options + options := controller.Options{ + Reconciler: r, + } + + if maxConcurrentReconciles > 0 { + options.MaxConcurrentReconciles = maxConcurrentReconciles + } + + // Create a new controller + c, err := controller.New("build-limit-cleanup-controller", mgr, options) + if err != nil { + return err + } + + pred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + o := e.Object.(*buildv1alpha1.Build) + return o.Spec.Retention != nil && (o.Spec.Retention.FailedLimit != nil || o.Spec.Retention.SucceededLimit != nil) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + n := e.ObjectNew.(*buildv1alpha1.Build) + return n.Spec.Retention != nil && (n.Spec.Retention.FailedLimit != nil || n.Spec.Retention.SucceededLimit != nil) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Never reconcile on deletion, there is nothing we have to do + return false + }, + } + + predBuildRun := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + // Never reconcile in case of create buildrun event + return false + }, + // Reconcile the build the related buildrun has just completed + UpdateFunc: func(e event.UpdateEvent) bool { + n := e.ObjectNew.(*buildv1alpha1.BuildRun) + o := e.ObjectOld.(*buildv1alpha1.BuildRun) + oldCondition := o.Status.GetCondition(buildv1alpha1.Succeeded) + newCondition := n.Status.GetCondition(buildv1alpha1.Succeeded) + if oldCondition != nil && newCondition != nil { + if (oldCondition.Status == corev1.ConditionUnknown) && + (newCondition.Status == corev1.ConditionFalse || newCondition.Status == corev1.ConditionTrue) { + if n.Status.BuildSpec != nil && n.Status.BuildSpec.Retention != nil && + (n.Status.BuildSpec.Retention.FailedLimit != nil || n.Status.BuildSpec.Retention.SucceededLimit != nil) { + return true + } + } + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Never reconcile on deletion, there is nothing we have to do + return false + }, + } + + // Watch for changes to primary resource Build + if err = c.Watch(&source.Kind{Type: &build.Build{}}, &handler.EnqueueRequestForObject{}, pred); err != nil { + return err + } + + // Watch for changes to resource BuildRun + return c.Watch(&source.Kind{Type: &buildv1alpha1.BuildRun{}}, handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request { + buildRun := o.(*buildv1alpha1.BuildRun) + // check if Buildrun is related to a build + if buildRun.Spec.BuildRef.Name == "" { + return []reconcile.Request{} + } + + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: buildRun.Spec.BuildRef.Name, + Namespace: buildRun.Namespace, + }, + }, + } + }), predBuildRun) +} diff --git a/pkg/reconciler/buildrun_ttl_cleanup/buildrun_ttl_cleanup.go b/pkg/reconciler/buildrun_ttl_cleanup/buildrun_ttl_cleanup.go new file mode 100644 index 0000000000..cca156dcb6 --- /dev/null +++ b/pkg/reconciler/buildrun_ttl_cleanup/buildrun_ttl_cleanup.go @@ -0,0 +1,117 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package buildrun_ttl_cleanup + +import ( + "context" + "time" + + buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/build/pkg/config" + "github.com/shipwright-io/build/pkg/ctxlog" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ReconcileBuildRun reconciles a BuildRun object + +type ReconcileBuildRun struct { + /* This client, initialized using mgr.Client() above, is a split client + that reads objects from the cache and writes to the apiserver */ + config *config.Config + client client.Client + scheme *runtime.Scheme + setOwnerReferenceFunc setOwnerReferenceFunc +} + +func NewReconciler(c *config.Config, mgr manager.Manager, ownerRef setOwnerReferenceFunc) reconcile.Reconciler { + return &ReconcileBuildRun{ + config: c, + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + setOwnerReferenceFunc: ownerRef, + } +} + +// GetBuildRunObject retrieves an existing BuildRun based on a name and namespace +func (r *ReconcileBuildRun) GetBuildRunObject(ctx context.Context, objectName string, objectNS string, buildRun *buildv1alpha1.BuildRun) error { + if err := r.client.Get(ctx, types.NamespacedName{Name: objectName, Namespace: objectNS}, buildRun); err != nil { + return err + } + return nil +} + +/* Reconciler makes sure the buildrun adheres to its ttl retention field and deletes it + once the ttl limit is hit */ +func (r *ReconcileBuildRun) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + // Set the ctx to be Background, as the top-level context for incoming requests.if + ctx, cancel := context.WithTimeout(ctx, r.config.CtxTimeOut) + defer cancel() + + ctxlog.Debug(ctx, "Start reconciling Buildrun-ttl", namespace, request.Namespace, name, request.Name) + + br := &buildv1alpha1.BuildRun{} + err := r.GetBuildRunObject(ctx, request.Name, request.Namespace, br) + if err != nil { + if apierrors.IsNotFound(err) { + ctxlog.Debug(ctx, "Finish reconciling buildrun-ttl. Buildrun was not found", namespace, request.Namespace, name, request.Name) + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + condition := br.Status.GetCondition(buildv1alpha1.Succeeded) + if condition == nil { + return reconcile.Result{}, nil + } + + /* In case ttl has been reached, delete the buildrun, if not, + calculate the remaining time and requeue the buildrun */ + switch condition.Status { + + case corev1.ConditionTrue: + if br.Status.BuildSpec.Retention.TtlAfterSucceeded != nil { + if br.Status.CompletionTime.Add(br.Status.BuildSpec.Retention.TtlAfterSucceeded.Duration).Before(time.Now()) { + ctxlog.Info(ctx, "Deleting successful buildrun as ttl has been reached.", namespace, request.Namespace, name, request.Name) + deleteBuildRunErr := r.client.Delete(ctx, br, &client.DeleteOptions{}) + if deleteBuildRunErr != nil { + if apierrors.IsNotFound(deleteBuildRunErr) { + return reconcile.Result{}, nil + } + ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, br.Name, deleteError, deleteBuildRunErr) + return reconcile.Result{}, deleteBuildRunErr + } + } else { + timeLeft := br.Status.CompletionTime.Add(br.Status.BuildSpec.Retention.TtlAfterSucceeded.Duration).Sub(time.Now()) + return reconcile.Result{Requeue: true, RequeueAfter: timeLeft}, nil + } + } + + case corev1.ConditionFalse: + if br.Status.BuildSpec.Retention.TtlAfterFailed != nil { + if br.Status.CompletionTime.Add(br.Status.BuildSpec.Retention.TtlAfterFailed.Duration).Before(time.Now()) { + ctxlog.Info(ctx, "Deleting failed buildrun as ttl has been reached.", namespace, request.Namespace, name, request.Name) + deleteBuildRunErr := r.client.Delete(ctx, br, &client.DeleteOptions{}) + if deleteBuildRunErr != nil { + if apierrors.IsNotFound(deleteBuildRunErr) { + return reconcile.Result{}, nil + } + ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, br.Name, deleteError, deleteBuildRunErr) + return reconcile.Result{}, deleteBuildRunErr + } + } else { + timeLeft := br.Status.CompletionTime.Add(br.Status.BuildSpec.Retention.TtlAfterFailed.Duration).Sub(time.Now()) + return reconcile.Result{Requeue: true, RequeueAfter: timeLeft}, nil + } + } + } + ctxlog.Debug(ctx, "Finishing reconciling request from a BuildRun event", namespace, request.Namespace, name, request.Name) + return reconcile.Result{}, nil +} diff --git a/pkg/reconciler/buildrun_ttl_cleanup/controller.go b/pkg/reconciler/buildrun_ttl_cleanup/controller.go new file mode 100644 index 0000000000..f36e0b8013 --- /dev/null +++ b/pkg/reconciler/buildrun_ttl_cleanup/controller.go @@ -0,0 +1,86 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package buildrun_ttl_cleanup + +import ( + "context" + + buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/build/pkg/config" + "github.com/shipwright-io/build/pkg/ctxlog" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + namespace string = "namespace" + name string = "name" + deleteError string = "error" +) + +type setOwnerReferenceFunc func(owner, object metav1.Object, scheme *runtime.Scheme) error + +// Add creates a new BuildRun Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(ctx context.Context, c *config.Config, mgr manager.Manager) error { + ctx = ctxlog.NewContext(ctx, "buildrun-ttl-cleanup-controller") + return add(ctx, mgr, NewReconciler(c, mgr, controllerutil.SetControllerReference), c.Controllers.BuildRun.MaxConcurrentReconciles) +} + +func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles int) error { + // Create the controller options + options := controller.Options{ + Reconciler: r, + } + + if maxConcurrentReconciles > 0 { + options.MaxConcurrentReconciles = maxConcurrentReconciles + + } + + c, err := controller.New("buildrun-ttl-cleanup-controller", mgr, options) + if err != nil { + return err + } + + predBuildRun := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + + o := e.Object.(*buildv1alpha1.BuildRun) + + if (o.Status.BuildSpec != nil) && (o.Status.BuildSpec.Retention != nil) && + (o.Status.BuildSpec.Retention.TtlAfterFailed != nil || o.Status.BuildSpec.Retention.SucceededLimit != nil) { + return true + } + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + o := e.ObjectNew.(*buildv1alpha1.BuildRun) + + if (o.Status.BuildSpec != nil) && (o.Status.BuildSpec.Retention != nil) && + (o.Status.BuildSpec.Retention.TtlAfterFailed != nil || o.Status.BuildSpec.Retention.SucceededLimit != nil) { + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Never reconcile on deletion, there is nothing we have to do + return false + }, + } + // Watch for changes to primary resource BuildRun + if err = c.Watch(&source.Kind{Type: &buildv1alpha1.BuildRun{}}, &handler.EnqueueRequestForObject{}, predBuildRun); err != nil { + return err + } + return nil +} diff --git a/test/build_samples.go b/test/build_samples.go index cce41843dd..03d357de99 100644 --- a/test/build_samples.go +++ b/test/build_samples.go @@ -506,3 +506,63 @@ spec: output: image: image-registry.openshift-image-registry.svc:5000/example/buildpacks-app ` + +// MinimalBuildWithRetentionTTLFive defines a simple +// Build with a source, a strategy and ttl +const MinimalBuildWithRetentionTTLFive = ` +apiVersion: shipwright.io/v1alpha1 +kind: Build +metadata: + name: build-retention-ttl +spec: + source: + url: "https://github.com/shipwright-io/sample-go" + contextDir: docker-build + strategy: + kind: ClusterBuildStrategy + output: + image: image-registry.openshift-image-registry.svc:5000/example/buildpacks-app + retention: + ttlAfterFailed: 5s + ttlAfterSucceeded: 5s +` + +// MinimalBuildWithRetentionLimitOne defines a simple +// Build with a source, a strategy and limits set as 1 +const MinimalBuildWithRetentionLimitOne = ` +apiVersion: shipwright.io/v1alpha1 +kind: Build +metadata: + name: build-retention-limit +spec: + source: + url: "https://github.com/shipwright-io/sample-go" + contextDir: docker-build + strategy: + kind: ClusterBuildStrategy + output: + image: image-registry.openshift-image-registry.svc:5000/example/buildpacks-app + retention: + failedLimit: 1 + succeededLimit: 1 +` + +// MinimalBuildWithRetentionLimitDiff defines a simple Build with a source, +// a strategy and different failed and succeeded limits +const MinimalBuildWithRetentionLimitDiff = ` +apiVersion: shipwright.io/v1alpha1 +kind: Build +metadata: + name: build-retention-limit +spec: + source: + url: "https://github.com/shipwright-io/sample-go" + contextDir: docker-build + strategy: + kind: ClusterBuildStrategy + output: + image: image-registry.openshift-image-registry.svc:5000/example/buildpacks-app + retention: + failedLimit: 1 + succeededLimit: 2 +` diff --git a/test/buildrun_samples.go b/test/buildrun_samples.go index 8b9945527a..7660f61de7 100644 --- a/test/buildrun_samples.go +++ b/test/buildrun_samples.go @@ -200,3 +200,15 @@ spec: buildRef: name: foobar ` + +// MinimalBuildRunRetention defines a minimal BuildRun +// with a reference used to test retention fields +const MinimalBuildRunRetention = ` +apiVersion: shipwright.io/v1alpha1 +kind: BuildRun +metadata: + name: buidrun-retention-ttl +spec: + buildRef: + name: build-retention-ttl +` diff --git a/test/clusterbuildstrategy_samples.go b/test/clusterbuildstrategy_samples.go index 16e0837e7c..a9593b0a0c 100644 --- a/test/clusterbuildstrategy_samples.go +++ b/test/clusterbuildstrategy_samples.go @@ -344,3 +344,35 @@ spec: args: - $(params.sleep-time) ` + +// Use a simple strategy that does not push the image +const ClusterBuildStrategySingleStepNoPush = ` +apiVersion: shipwright.io/v1alpha1 +kind: ClusterBuildStrategy +metadata: + name: buildah +spec: + buildSteps: + - name: buildah-bud + image: quay.io/containers/buildah:v1.20.1 + workingDir: $(params.shp-source-root) + securityContext: + privileged: true + command: + - /usr/bin/buildah + args: + - bud + - --tag=$(params.shp-output-image) + - --file=$(build.dockerfile) + - $(params.shp-source-context) + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + volumeMounts: + - name: buildah-images + mountPath: /var/lib/containers/storage +` diff --git a/test/integration/buildrun_cleanup_test.go b/test/integration/buildrun_cleanup_test.go new file mode 100644 index 0000000000..7cede008f3 --- /dev/null +++ b/test/integration/buildrun_cleanup_test.go @@ -0,0 +1,292 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 +package integration_test + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/build/test" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +var _ = Describe("Integration tests for retention limits and ttls for succeeded buildruns.", func() { + var ( + cbsObject *v1alpha1.ClusterBuildStrategy + cbsObjectFail *v1alpha1.ClusterBuildStrategy + buildObject *v1alpha1.Build + buildRunObject *v1alpha1.BuildRun + buildSample []byte + buildRunSample []byte + ) + + // Load the ClusterBuildStrategies before each test case + BeforeEach(func() { + cbsObject, err = tb.Catalog.LoadCBSWithName(STRATEGY+tb.Namespace, []byte(test.ClusterBuildStrategyNoOp)) + Expect(err).To(BeNil()) + + err = tb.CreateClusterBuildStrategy(cbsObject) + Expect(err).To(BeNil()) + }) + + AfterEach(func() { + _, err = tb.GetBuild(buildObject.Name) + if err == nil { + Expect(tb.DeleteBuild(buildObject.Name)).To(BeNil()) + } + + err := tb.DeleteClusterBuildStrategy(cbsObject.Name) + Expect(err).To(BeNil()) + }) + + JustBeforeEach(func() { + if buildSample != nil { + buildObject, err = tb.Catalog.LoadBuildWithNameAndStrategy(BUILD+tb.Namespace, STRATEGY+tb.Namespace, buildSample) + Expect(err).To(BeNil()) + } + + if buildRunSample != nil { + buildRunObject, err = tb.Catalog.LoadBRWithNameAndRef(BUILDRUN+tb.Namespace, BUILD+tb.Namespace, buildRunSample) + Expect(err).To(BeNil()) + } + }) + + Context("When a buildrun related to a build with short ttl set succeeds", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionTTLFive) + buildRunSample = []byte(test.MinimalBuildRunRetention) + }) + + It("Should not find the buildrun after few seconds after it succeeds", func() { + + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + br, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Minute) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("Multiple successful buildruns related to build with limit 1", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetention) + }) + + It("The older successful buildrun should not exist", func() { + + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + // Create first buildrun + buildRunObject.Name = BUILDRUN + tb.Namespace + "-1" + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br1, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br1.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + // Create second buildrun + buildRunObject.Name = BUILDRUN + tb.Namespace + "-2" + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br2, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br2.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + + _, err = tb.GetBR(BUILDRUN + tb.Namespace + "-1") + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + }) + + Context("Multiple buildruns with different build limits for failure and success", func() { + + BeforeEach(func() { + cbsObjectFail, err = tb.Catalog.LoadCBSWithName(STRATEGY+tb.Namespace+"-fail", []byte(test.ClusterBuildStrategySingleStepNoPush)) + Expect(err).To(BeNil()) + + // Load build strategy that can fail + err = tb.CreateClusterBuildStrategy(cbsObjectFail) + Expect(err).To(BeNil()) + buildSample = []byte(test.MinimalBuildWithRetentionLimitDiff) + buildRunSample = []byte(test.MinimalBuildRunRetention) + + }) + + AfterEach(func() { + err := tb.DeleteClusterBuildStrategy(cbsObjectFail.Name) + Expect(err).To(BeNil()) + }) + // Case with failedLimit 1 and succeededLimit 2. + // It ensures that only relevant buildruns are affected by retention parameters + It("Should not find the old failed buildrun if the limit has been triggered", func() { + + // Create build + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + // Create 2 successful buildruns + buildRunObject.Name = BUILDRUN + tb.Namespace + "-success-1" + err = tb.CreateBR(buildRunObject) + + buildRunObject.Name = BUILDRUN + tb.Namespace + "-success-2" + err = tb.CreateBR(buildRunObject) + + // Load and create the failing buildobject with the relevant cbs + buildObject, err = tb.Catalog.LoadBuildWithNameAndStrategy(BUILD+tb.Namespace+"-fail", STRATEGY+tb.Namespace+"-fail", buildSample) + Expect(err).To(BeNil()) + buildObject.Spec.Source.ContextDir = nil + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + // Create 1 failed buildrun + buildRunObject.Name = BUILDRUN + tb.Namespace + "-fail-1" + buildRunObject.Spec.BuildRef.Name = BUILD + tb.Namespace + "-fail" + err = tb.CreateBR(buildRunObject) + + // Wait for buildrun completion + br1, err := tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-success-1") + Expect(err).To(BeNil()) + Expect(br1.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + + br2, err := tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-success-2") + Expect(err).To(BeNil()) + Expect(br2.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + + br3, err := tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-fail-1") + Expect(err).To(BeNil()) + Expect(br3.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + + // Create 1 failed buildrun. + buildRunObject.Name = BUILDRUN + tb.Namespace + "-fail-2" + buildRunObject.Spec.BuildRef.Name = BUILD + tb.Namespace + "-fail" + err = tb.CreateBR(buildRunObject) + br4, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br4.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + + // Check that the older failed buildrun has been deleted while the successful buildruns exist + _, err = tb.GetBR(BUILDRUN + tb.Namespace + "-fail-1") + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + _, err = tb.GetBR(BUILDRUN + tb.Namespace + "-success-1") + Expect(err).To(BeNil()) + _, err = tb.GetBR(BUILDRUN + tb.Namespace + "-success-2") + Expect(err).To(BeNil()) + }) + }) + +}) + +var _ = Describe("Integration tests for retention limits and ttls of buildRuns that fail", func() { + var ( + cbsObject *v1alpha1.ClusterBuildStrategy + buildObject *v1alpha1.Build + buildRunObject *v1alpha1.BuildRun + buildSample []byte + buildRunSample []byte + ) + // Load the ClusterBuildStrategies before each test case + BeforeEach(func() { + cbsObject, err = tb.Catalog.LoadCBSWithName(STRATEGY+tb.Namespace, []byte(test.ClusterBuildStrategySingleStepNoPush)) + Expect(err).To(BeNil()) + + err = tb.CreateClusterBuildStrategy(cbsObject) + Expect(err).To(BeNil()) + }) + AfterEach(func() { + + _, err = tb.GetBuild(buildObject.Name) + if err == nil { + Expect(tb.DeleteBuild(buildObject.Name)).To(BeNil()) + } + + err := tb.DeleteClusterBuildStrategy(cbsObject.Name) + Expect(err).To(BeNil()) + }) + + JustBeforeEach(func() { + if buildSample != nil { + buildObject, err = tb.Catalog.LoadBuildWithNameAndStrategy(BUILD+tb.Namespace, STRATEGY+tb.Namespace, buildSample) + Expect(err).To(BeNil()) + } + + if buildRunSample != nil { + buildRunObject, err = tb.Catalog.LoadBRWithNameAndRef(BUILDRUN+tb.Namespace, BUILD+tb.Namespace, buildRunSample) + Expect(err).To(BeNil()) + } + }) + + Context("When a buildrun related to a build with short ttl set succeeds", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionTTLFive) + buildRunSample = []byte(test.MinimalBuildRunRetention) + }) + + It("Should not find the buildrun after few seconds after it fails", func() { + + buildObject.Spec.Source.ContextDir = nil + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + br, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Minute) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("Multiple failed buildruns related to build with limit 1", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetention) + }) + + It("The older failed buildrun should not exist", func() { + buildObject.Spec.Source.ContextDir = nil + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + // Create first buildrun + buildRunObject.Name = BUILDRUN + tb.Namespace + "-1" + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br1, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br1.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + // Create second buildrun + buildRunObject.Name = BUILDRUN + tb.Namespace + "-2" + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br2, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br2.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + + _, err = tb.GetBR(BUILDRUN + tb.Namespace + "-1") + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + }) + +}) diff --git a/test/utils/buildruns.go b/test/utils/buildruns.go index c25a12b4c0..afa3395c2e 100644 --- a/test/utils/buildruns.go +++ b/test/utils/buildruns.go @@ -9,6 +9,7 @@ import ( "encoding/json" "errors" "fmt" + "time" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -108,6 +109,31 @@ func (t *TestBuild) GetBRTillCompletion(name string) (*v1alpha1.BuildRun, error) return brInterface.Get(context.TODO(), name, metav1.GetOptions{}) } +// GetBRTillNotFound waits for the buildrun to get deleted. It returns an error if BuildRun is not found +func (t *TestBuild) GetBRTillNotFound(name string, interval time.Duration, timeout time.Duration) (*v1alpha1.BuildRun, error) { + + var ( + GetBRTillNotFound = func() (bool, error) { + + bInterface := t.BuildClientSet.ShipwrightV1alpha1().BuildRuns(t.Namespace) + _, err := bInterface.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return true, err + } + return false, nil + } + ) + + brInterface := t.BuildClientSet.ShipwrightV1alpha1().BuildRuns(t.Namespace) + + err := wait.PollImmediate(interval, timeout, GetBRTillNotFound) + if err != nil { + return nil, err + } + + return brInterface.Get(context.TODO(), name, metav1.GetOptions{}) +} + // GetBRTillNotOwner returns a BuildRun that has not an owner. // If the timeout is reached or it fails when retrieving the BuildRun it will // stop polling and return From 9dd60e36d729c4d4bee30d075abcf228539454a5 Mon Sep 17 00:00:00 2001 From: Pravallika Dindukurthi Date: Tue, 29 Mar 2022 17:38:54 +0530 Subject: [PATCH 2/2] Resolved verification issues and resolve review comments. Signed-off-by: Raghav Bhatnagar Signed-off-by: Pravallika Dindukurthi --- deploy/crds/shipwright.io_buildruns.yaml | 44 +++- deploy/crds/shipwright.io_builds.yaml | 6 +- docs/build.md | 15 +- docs/buildrun.md | 49 ++-- pkg/apis/build/v1alpha1/build_types.go | 10 +- pkg/apis/build/v1alpha1/buildrun_types.go | 18 ++ .../build/v1alpha1/zz_generated.deepcopy.go | 39 ++- pkg/controller/controller.go | 8 +- .../build_limit_cleanup.go | 76 +++--- .../controller.go | 62 +++-- .../buildrun_ttl_cleanup.go | 117 --------- .../buildrun_ttl_cleanup/controller.go | 86 ------- .../buildrun_ttl_cleanup.go | 99 ++++++++ .../buildrunttlcleanup/controller.go | 146 ++++++++++++ test/build_samples.go | 20 ++ test/buildrun_samples.go | 28 +++ test/clusterbuildstrategy_samples.go | 38 +-- test/integration/buildrun_cleanup_test.go | 224 +++++++++++++++--- 18 files changed, 710 insertions(+), 375 deletions(-) rename pkg/reconciler/{build_limit_cleanup => buildlimitcleanup}/build_limit_cleanup.go (58%) rename pkg/reconciler/{build_limit_cleanup => buildlimitcleanup}/controller.go (64%) delete mode 100644 pkg/reconciler/buildrun_ttl_cleanup/buildrun_ttl_cleanup.go delete mode 100644 pkg/reconciler/buildrun_ttl_cleanup/controller.go create mode 100644 pkg/reconciler/buildrunttlcleanup/buildrun_ttl_cleanup.go create mode 100644 pkg/reconciler/buildrunttlcleanup/controller.go diff --git a/deploy/crds/shipwright.io_buildruns.yaml b/deploy/crds/shipwright.io_buildruns.yaml index d7ac557282..6542d7c2a9 100644 --- a/deploy/crds/shipwright.io_buildruns.yaml +++ b/deploy/crds/shipwright.io_buildruns.yaml @@ -366,13 +366,13 @@ spec: minimum: 1 type: integer ttlAfterFailed: - description: TtlAfterFailed defines the maximum duration of + description: TTLAfterFailed defines the maximum duration of time the failed buildrun should exist. format: duration type: string ttlAfterSucceeded: - description: TtlAfterFailed defines the maximum duration of - time the succeeded buildrun should exist. + description: TTLAfterSucceeded defines the maximum duration + of time the succeeded buildrun should exist. format: duration type: string type: object @@ -710,6 +710,20 @@ spec: - name type: object type: array + retention: + description: Contains information about retention params + properties: + ttlAfterFailed: + description: TTLAfterFailed defines the maximum duration of time + the failed buildrun should exist. + format: duration + type: string + ttlAfterSucceeded: + description: TTLAfterSucceeded defines the maximum duration of + time the succeeded buildrun should exist. + format: duration + type: string + type: object serviceAccount: description: ServiceAccount refers to the kubernetes serviceaccount which is used for resource control. Default serviceaccount will @@ -1044,6 +1058,30 @@ spec: - name type: object type: array + retention: + description: Contains information about retention params + properties: + failedLimit: + description: FailedLimit defines the maximum number of failed + buildruns that should exist. + minimum: 1 + type: integer + succeededLimit: + description: SucceededLimit defines the maximum number of + succeeded buildruns that should exist. + minimum: 1 + type: integer + ttlAfterFailed: + description: TTLAfterFailed defines the maximum duration of + time the failed buildrun should exist. + format: duration + type: string + ttlAfterSucceeded: + description: TTLAfterSucceeded defines the maximum duration + of time the succeeded buildrun should exist. + format: duration + type: string + type: object source: description: Source refers to the Git repository containing the source code to be built. diff --git a/deploy/crds/shipwright.io_builds.yaml b/deploy/crds/shipwright.io_builds.yaml index f28ac4ac02..77eb0df1fd 100644 --- a/deploy/crds/shipwright.io_builds.yaml +++ b/deploy/crds/shipwright.io_builds.yaml @@ -345,13 +345,13 @@ spec: minimum: 1 type: integer ttlAfterFailed: - description: TtlAfterFailed defines the maximum duration of time + description: TTLAfterFailed defines the maximum duration of time the failed buildrun should exist. format: duration type: string ttlAfterSucceeded: - description: TtlAfterFailed defines the maximum duration of time - the succeeded buildrun should exist. + description: TTLAfterSucceeded defines the maximum duration of + time the succeeded buildrun should exist. format: duration type: string type: object diff --git a/docs/build.md b/docs/build.md index 2e6706b5c0..0345023181 100644 --- a/docs/build.md +++ b/docs/build.md @@ -569,20 +569,7 @@ An example of a user using both TTL and Limit retention fields. In case of such succeededLimit: 20 ``` -**NOTE**: When changes are made to `retention.failedLimit` and `retention.succeededLimit` values, they come into effect as soon as the build is applied, thereby enforcing the new limits. On the other hand, changing the `retention.ttlAfterFailed` and `retention.ttlAfterSucceeded` values will only affect new buildruns. Old buildruns will adhere to the old TTL retention values. - -#### Build_limit_cleanup controller - -Builds are watched by the build_limit_cleanup controller for the following preconditions: - -- Update on `Build` resource if either `retention.failedLimit` or `retention.succeeded` are set -- Create on `Build` resource if either `retention.failedLimit` or `retention.succeeded` are set - -build_limit_cleanup also watches buildruns for the following precondition: - -- Update on `BuildRun` resource if either `retention.failedLimit` or `retention.succeeded` are set and the Buildrun has just completed execution. - -If these conditions are met, the reconciler calculates the number of buildruns that exists and deletes the oldest ones till the limit is satisfied. +**NOTE**: When changes are made to `retention.failedLimit` and `retention.succeededLimit` values, they come into effect as soon as the build is applied, thereby enforcing the new limits. On the other hand, changing the `retention.ttlAfterFailed` and `retention.ttlAfterSucceeded` values will only affect new buildruns. Old buildruns will adhere to the old TTL retention values. In case TTL values are defined in buildrun specifications as well as build specifications, priority will be given to the values defined in the buildrun specifications. ### Sources diff --git a/docs/buildrun.md b/docs/buildrun.md index 906300e083..4505e3f5be 100644 --- a/docs/buildrun.md +++ b/docs/buildrun.md @@ -13,6 +13,7 @@ SPDX-License-Identifier: Apache-2.0 - [Defining the BuildSpec](#defining-the-buildspec) - [Defining ParamValues](#defining-paramvalues) - [Defining the ServiceAccount](#defining-the-serviceaccount) + - [Defining Retention Parameters](#defining-retention-parameters) - [Canceling a `BuildRun`](#canceling-a-buildrun) - [Automatic `BuildRun` deletion](#automatic-buildrun-deletion) - [Specifying Environment Variables](#specifying-environment-variables) @@ -167,6 +168,32 @@ You can also use set the `spec.serviceAccount.generate` path to `true`. This wil _**Note**_: When the service account is not defined, the `BuildRun` uses the `pipeline` service account if it exists in the namespace, and falls back to the `default` service account. +### Defining Retention Parameters + +A `Buildrun` resource can specify how long a completed BuildRun can exist. Instead of manually cleaning up old BuildRuns, retention parameters provide an alternate method for cleaning up BuildRuns automatically. + +As part of the buildrun retention parameters, we have the following fields: + +- `retention.ttlAfterFailed` - Specifies the duration for which a failed buildrun can exist. +- `retention.ttlAfterSucceeded` - Specifies the duration for which a successful buildrun can exist. + +An example of a user using buildrun TTL parameters. + +```yaml +apiVersion: shipwright.io/v1alpha1 +kind: BuildRun +metadata: + name: buidrun-retention-ttl +spec: + buildRef: + name: build-retention-ttl + retention: + ttlAfterFailed: 10m + ttlAfterSucceeded: 10m +``` + +**NOTE** In case TTL values are defined in buildrun specifications as well as build specifications, priority will be given to the values defined in the buildrun specifications. + ## Canceling a `BuildRun` To cancel a `BuildRun` that's currently executing, update its status to mark it as canceled. @@ -187,23 +214,17 @@ spec: ## Automatic `BuildRun` deletion -We have two controllers that ensure that buildruns can be deleted automatically if required. This is ensured by adding `retention` parameters in build specifications. +We have two controllers that ensure that buildruns can be deleted automatically if required. This is ensured by adding `retention` parameters in either the build specifications or the buildrun specifications. -- TTL parameters: These are used by the buildrun_ttl_cleanup controller. +- Buildrun TTL parameters: These are used to make sure that buildruns exist for a fixed duration of time after completiion. + - `buildrun.spec.retention.ttlAfterFailed`: The buildrun is deleted if the mentioned duration of time has passed and the buildrun has failed. + - `buildrun.spec.retention.ttlAfterSucceeded`: The buildrun is deleted if the mentioned duration of time has passed and the buildrun has succeeded. +- Build TTL parameters: These are used to make sure that related buildruns exist for a fixed duration of time after completiion. - `build.spec.retention.ttlAfterFailed`: The buildrun is deleted if the mentioned duration of time has passed and the buildrun has failed. - `build.spec.retention.ttlAfterSucceeded`: The buildrun is deleted if the mentioned duration of time has passed and the buildrun has succeeded. -- Limit parameters: These are used by the build_limit_cleanup controller. - - `build.spec.retention.failedLimit`: This parameter ensures that the number of failed buildruns do not exceed this limit. If the limit is exceeded, the oldest failed buildruns are deleted till the limit is satisfied. - - `build.spec.retention.succeededLimit`: This parameter ensures that the number of successful buildruns do not exceed this limit. If the limit is exceeded, the oldest successful buildruns are deleted till the limit is satisfied. - -### Buildrun_ttl_cleanup controller - -Buildruns are watched by the buildrun_ttl_cleanup controller for the following preconditions: - -- Update on `Buildrun` resource if either `ttlAfterSucceeded` or `ttlAfterFailed` are set. -- Create on `Buildrun` resource if either `ttlAfterSucceeded` or `ttlAfterFailed` are set. - -If these conditions are met, the reconciler checks if the ttl retention field is satisfied or not. If it is not, the buildrun is requeued after calculating the remaining time the buildrun is allowed to exist. +- Build Limit parameters: These are used to make sure that related buildruns exist for a fixed duration of time after completiion. + - `build.spec.retention.succeededLimit` - Defines number of succeeded BuildRuns for a Build that can exist. + - `build.spec.retention.failedLimit` - Defines number of failed BuildRuns for a Build that can exist. ## Specifying Environment Variables diff --git a/pkg/apis/build/v1alpha1/build_types.go b/pkg/apis/build/v1alpha1/build_types.go index ba76653898..adbac319cd 100644 --- a/pkg/apis/build/v1alpha1/build_types.go +++ b/pkg/apis/build/v1alpha1/build_types.go @@ -220,7 +220,7 @@ type BuildList struct { Items []Build `json:"items"` } -// Retention struct for buildrun cleanup +// BuildRetention struct for buildrun cleanup type BuildRetention struct { // FailedLimit defines the maximum number of failed buildruns that should exist. // @@ -232,16 +232,16 @@ type BuildRetention struct { // +optional // +kubebuilder:validation:Minimum=1 SucceededLimit *uint `json:"succeededLimit,omitempty"` - // TtlAfterFailed defines the maximum duration of time the failed buildrun should exist. + // TTLAfterFailed defines the maximum duration of time the failed buildrun should exist. // // +optional // +kubebuilder:validation:Format=duration - TtlAfterFailed *metav1.Duration `json:"ttlAfterFailed,omitempty"` - // TtlAfterFailed defines the maximum duration of time the succeeded buildrun should exist. + TTLAfterFailed *metav1.Duration `json:"ttlAfterFailed,omitempty"` + // TTLAfterSucceeded defines the maximum duration of time the succeeded buildrun should exist. // // +optional // +kubebuilder:validation:Format=duration - TtlAfterSucceeded *metav1.Duration `json:"ttlAfterSucceeded,omitempty"` + TTLAfterSucceeded *metav1.Duration `json:"ttlAfterSucceeded,omitempty"` } func init() { diff --git a/pkg/apis/build/v1alpha1/buildrun_types.go b/pkg/apis/build/v1alpha1/buildrun_types.go index 4a01e6e20d..9011576c3e 100644 --- a/pkg/apis/build/v1alpha1/buildrun_types.go +++ b/pkg/apis/build/v1alpha1/buildrun_types.go @@ -72,6 +72,10 @@ type BuildRunSpec struct { // // +optional Env []corev1.EnvVar `json:"env,omitempty"` + + // Contains information about retention params + // +optional + Retention *BuildRunRetention `json:"retention,omitempty"` } // BuildRunRequestedState defines the buildrun state the user can provide to override whatever is the current state. @@ -296,6 +300,20 @@ type Condition struct { Message string `json:"message" description:"human-readable message indicating details about last transition"` } +// BuildRunRetention struct for buildrun cleanup +type BuildRunRetention struct { + // TTLAfterFailed defines the maximum duration of time the failed buildrun should exist. + // + // +optional + // +kubebuilder:validation:Format=duration + TTLAfterFailed *metav1.Duration `json:"ttlAfterFailed,omitempty"` + // TTLAfterSucceeded defines the maximum duration of time the succeeded buildrun should exist. + // + // +optional + // +kubebuilder:validation:Format=duration + TTLAfterSucceeded *metav1.Duration `json:"ttlAfterSucceeded,omitempty"` +} + func init() { SchemeBuilder.Register(&BuildRun{}, &BuildRunList{}) } diff --git a/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go index 98e5e3254c..a063404cc0 100644 --- a/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go @@ -110,13 +110,13 @@ func (in *BuildRetention) DeepCopyInto(out *BuildRetention) { *out = new(uint) **out = **in } - if in.TtlAfterFailed != nil { - in, out := &in.TtlAfterFailed, &out.TtlAfterFailed + if in.TTLAfterFailed != nil { + in, out := &in.TTLAfterFailed, &out.TTLAfterFailed *out = new(v1.Duration) **out = **in } - if in.TtlAfterSucceeded != nil { - in, out := &in.TtlAfterSucceeded, &out.TtlAfterSucceeded + if in.TTLAfterSucceeded != nil { + in, out := &in.TTLAfterSucceeded, &out.TTLAfterSucceeded *out = new(v1.Duration) **out = **in } @@ -194,6 +194,32 @@ func (in *BuildRunList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildRunRetention) DeepCopyInto(out *BuildRunRetention) { + *out = *in + if in.TTLAfterFailed != nil { + in, out := &in.TTLAfterFailed, &out.TTLAfterFailed + *out = new(v1.Duration) + **out = **in + } + if in.TTLAfterSucceeded != nil { + in, out := &in.TTLAfterSucceeded, &out.TTLAfterSucceeded + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildRunRetention. +func (in *BuildRunRetention) DeepCopy() *BuildRunRetention { + if in == nil { + return nil + } + out := new(BuildRunRetention) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildRunSpec) DeepCopyInto(out *BuildRunSpec) { *out = *in @@ -248,6 +274,11 @@ func (in *BuildRunSpec) DeepCopyInto(out *BuildRunSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = new(BuildRunRetention) + (*in).DeepCopyInto(*out) + } return } diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index ea24555ccf..656f134b01 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -16,9 +16,9 @@ import ( "github.com/shipwright-io/build/pkg/config" "github.com/shipwright-io/build/pkg/ctxlog" "github.com/shipwright-io/build/pkg/reconciler/build" - "github.com/shipwright-io/build/pkg/reconciler/build_limit_cleanup" + "github.com/shipwright-io/build/pkg/reconciler/buildlimitcleanup" "github.com/shipwright-io/build/pkg/reconciler/buildrun" - "github.com/shipwright-io/build/pkg/reconciler/buildrun_ttl_cleanup" + "github.com/shipwright-io/build/pkg/reconciler/buildrunttlcleanup" "github.com/shipwright-io/build/pkg/reconciler/buildstrategy" "github.com/shipwright-io/build/pkg/reconciler/clusterbuildstrategy" ) @@ -65,11 +65,11 @@ func NewManager(ctx context.Context, config *config.Config, cfg *rest.Config, op return nil, err } - if err := build_limit_cleanup.Add(ctx, config, mgr); err != nil { + if err := buildlimitcleanup.Add(ctx, config, mgr); err != nil { return nil, err } - if err := buildrun_ttl_cleanup.Add(ctx, config, mgr); err != nil { + if err := buildrunttlcleanup.Add(ctx, config, mgr); err != nil { return nil, err } diff --git a/pkg/reconciler/build_limit_cleanup/build_limit_cleanup.go b/pkg/reconciler/buildlimitcleanup/build_limit_cleanup.go similarity index 58% rename from pkg/reconciler/build_limit_cleanup/build_limit_cleanup.go rename to pkg/reconciler/buildlimitcleanup/build_limit_cleanup.go index 4365d744a8..69bca8008a 100644 --- a/pkg/reconciler/build_limit_cleanup/build_limit_cleanup.go +++ b/pkg/reconciler/buildlimitcleanup/build_limit_cleanup.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package build_limit_cleanup +package buildlimitcleanup import ( "context" @@ -14,7 +14,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -22,25 +21,21 @@ import ( // ReconcileBuild reconciles a Build object type ReconcileBuild struct { - /* This client, initialized using mgr.Client() above, is a split client - that reads objects from the cache and writes to the apiserver */ - config *config.Config - client client.Client - scheme *runtime.Scheme - setOwnerReferenceFunc setOwnerReferenceFunc + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver */ + config *config.Config + client client.Client } -func NewReconciler(c *config.Config, mgr manager.Manager, ownerRef setOwnerReferenceFunc) reconcile.Reconciler { +func NewReconciler(c *config.Config, mgr manager.Manager) reconcile.Reconciler { return &ReconcileBuild{ - config: c, - client: mgr.GetClient(), - scheme: mgr.GetScheme(), - setOwnerReferenceFunc: ownerRef, + config: c, + client: mgr.GetClient(), } } -/* Reconciler finds retentions fields in builds and makes sure the - number of corresponding buildruns adhere to these limits */ +// Reconciler finds retentions fields in builds and makes sure the +// number of corresponding buildruns adhere to these limits func (r *ReconcileBuild) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { // Set the ctx to be Background, as the top-level context for incoming requests. ctx, cancel := context.WithTimeout(ctx, r.config.CtxTimeOut) @@ -59,6 +54,16 @@ func (r *ReconcileBuild) Reconcile(ctx context.Context, request reconcile.Reques return reconcile.Result{}, err } + // early exit if there is no retention section + if b.Spec.Retention == nil { + return reconcile.Result{}, nil + } + + // early exit if retention section has no limit + if b.Spec.Retention.SucceededLimit == nil && b.Spec.Retention.FailedLimit == nil { + return reconcile.Result{}, nil + } + lbls := map[string]string{ build.LabelBuild: b.Name, } @@ -67,7 +72,12 @@ func (r *ReconcileBuild) Reconcile(ctx context.Context, request reconcile.Reques LabelSelector: labels.SelectorFromSet(lbls), } allBuildRuns := &build.BuildRunList{} - r.client.List(ctx, allBuildRuns, &opts) + + err = r.client.List(ctx, allBuildRuns, &opts) + if err != nil { + return reconcile.Result{}, err + } + if len(allBuildRuns.Items) == 0 { return reconcile.Result{}, nil } @@ -90,19 +100,20 @@ func (r *ReconcileBuild) Reconcile(ctx context.Context, request reconcile.Reques // Check limits and delete oldest buildruns if limit is reached. if b.Spec.Retention.SucceededLimit != nil { if len(buildRunSucceeded) > int(*b.Spec.Retention.SucceededLimit) { + // Sort buildruns with oldest one at the beginning sort.Slice(buildRunSucceeded, func(i, j int) bool { - return buildRunSucceeded[i].Status.CompletionTime.Before(buildRunSucceeded[j].Status.CompletionTime) + return buildRunSucceeded[i].ObjectMeta.CreationTimestamp.Before(&buildRunSucceeded[j].ObjectMeta.CreationTimestamp) }) lenOfList := len(buildRunSucceeded) - for i := 0; lenOfList-i > int(*b.Spec.Retention.SucceededLimit); i += 1 { + for i := 0; i < lenOfList-int(*b.Spec.Retention.SucceededLimit); i++ { ctxlog.Info(ctx, "Deleting succeeded buildrun as cleanup limit has been reached.", namespace, request.Namespace, name, buildRunSucceeded[i].Name) - deleteBuildRunErr := r.client.Delete(ctx, &buildRunSucceeded[i], &client.DeleteOptions{}) - if deleteBuildRunErr != nil { - if apierrors.IsNotFound(deleteBuildRunErr) { - return reconcile.Result{}, nil + err := r.client.Delete(ctx, &buildRunSucceeded[i], &client.DeleteOptions{}) + if err != nil { + if !apierrors.IsNotFound(err) { + ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, &buildRunSucceeded[i].Name, "error", err) + return reconcile.Result{}, err } - ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, &buildRunSucceeded[i].Name, deleteError, deleteBuildRunErr) - return reconcile.Result{}, nil + ctxlog.Debug(ctx, "Error deleting buildRun. It has already been deleted.", namespace, request.Namespace, name, &buildRunSucceeded[i].Name) } } } @@ -110,19 +121,20 @@ func (r *ReconcileBuild) Reconcile(ctx context.Context, request reconcile.Reques if b.Spec.Retention.FailedLimit != nil { if len(buildRunFailed) > int(*b.Spec.Retention.FailedLimit) { + // Sort buildruns with oldest one at the beginning sort.Slice(buildRunFailed, func(i, j int) bool { - return buildRunFailed[i].Status.CompletionTime.Before(buildRunFailed[j].Status.CompletionTime) + return buildRunFailed[i].ObjectMeta.CreationTimestamp.Before(&buildRunFailed[j].ObjectMeta.CreationTimestamp) }) lenOfList := len(buildRunFailed) - for i := 0; lenOfList-i > int(*b.Spec.Retention.FailedLimit); i += 1 { + for i := 0; i < lenOfList-int(*b.Spec.Retention.FailedLimit); i++ { ctxlog.Info(ctx, "Deleting failed buildrun as cleanup limit has been reached.", namespace, request.Namespace, name, buildRunFailed[i].Name) - deleteBuildRunErr := r.client.Delete(ctx, &buildRunFailed[i], &client.DeleteOptions{}) - if deleteBuildRunErr != nil { - if apierrors.IsNotFound(deleteBuildRunErr) { - return reconcile.Result{}, nil + err := r.client.Delete(ctx, &buildRunFailed[i], &client.DeleteOptions{}) + if err != nil { + if !apierrors.IsNotFound(err) { + ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, &buildRunFailed[i].Name, "error", err) + return reconcile.Result{}, err } - ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, buildRunFailed[i].Name, deleteError, deleteBuildRunErr) - return reconcile.Result{}, nil + ctxlog.Debug(ctx, "Error deleting buildRun. It has already been deleted.", namespace, request.Namespace, name, &buildRunFailed[i].Name) } } } diff --git a/pkg/reconciler/build_limit_cleanup/controller.go b/pkg/reconciler/buildlimitcleanup/controller.go similarity index 64% rename from pkg/reconciler/build_limit_cleanup/controller.go rename to pkg/reconciler/buildlimitcleanup/controller.go index 9e85c70660..77e72ee70e 100644 --- a/pkg/reconciler/build_limit_cleanup/controller.go +++ b/pkg/reconciler/buildlimitcleanup/controller.go @@ -2,22 +2,18 @@ // // SPDX-License-Identifier: Apache-2.0 -package build_limit_cleanup +package buildlimitcleanup import ( "context" - build "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" "github.com/shipwright-io/build/pkg/config" "github.com/shipwright-io/build/pkg/ctxlog" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -27,20 +23,16 @@ import ( ) const ( - namespace string = "namespace" - name string = "name" - deleteError string = "error" + namespace string = "namespace" + name string = "name" ) -type setOwnerReferenceFunc func(owner, object metav1.Object, scheme *runtime.Scheme) error - -// Add creates a new Build Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. +// Add creates a new build_limit_cleanup Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started func Add(ctx context.Context, c *config.Config, mgr manager.Manager) error { ctx = ctxlog.NewContext(ctx, "build-limit-cleanup-controller") - return add(ctx, mgr, NewReconciler(c, mgr, controllerutil.SetControllerReference), c.Controllers.Build.MaxConcurrentReconciles) + return add(ctx, mgr, NewReconciler(c, mgr), c.Controllers.Build.MaxConcurrentReconciles) } - func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles int) error { // Create the controller options options := controller.Options{ @@ -64,7 +56,26 @@ func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler, maxCo }, UpdateFunc: func(e event.UpdateEvent) bool { n := e.ObjectNew.(*buildv1alpha1.Build) - return n.Spec.Retention != nil && (n.Spec.Retention.FailedLimit != nil || n.Spec.Retention.SucceededLimit != nil) + o := e.ObjectOld.(*buildv1alpha1.Build) + + // Check to see if there are new retention parameters or whether the + // limit values have decreased + if o.Spec.Retention == nil && n.Spec.Retention != nil { + if n.Spec.Retention.FailedLimit != nil || n.Spec.Retention.SucceededLimit != nil { + return true + } + } else if n.Spec.Retention != nil && o.Spec.Retention != nil { + if n.Spec.Retention.FailedLimit != nil && o.Spec.Retention.FailedLimit == nil { + return true + } else if n.Spec.Retention.SucceededLimit != nil && o.Spec.Retention.SucceededLimit == nil { + return true + } else if n.Spec.Retention.FailedLimit != nil && o.Spec.Retention.FailedLimit != nil && int(*n.Spec.Retention.FailedLimit) < int(*o.Spec.Retention.FailedLimit) { + return true + } else if n.Spec.Retention.SucceededLimit != nil && o.Spec.Retention.SucceededLimit != nil && int(*n.Spec.Retention.SucceededLimit) < int(*o.Spec.Retention.SucceededLimit) { + return true + } + } + return false }, DeleteFunc: func(e event.DeleteEvent) bool { // Never reconcile on deletion, there is nothing we have to do @@ -80,16 +91,19 @@ func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler, maxCo // Reconcile the build the related buildrun has just completed UpdateFunc: func(e event.UpdateEvent) bool { n := e.ObjectNew.(*buildv1alpha1.BuildRun) + + // check if Buildrun is related to a build + if n.Spec.BuildRef == nil { + return false + } + o := e.ObjectOld.(*buildv1alpha1.BuildRun) oldCondition := o.Status.GetCondition(buildv1alpha1.Succeeded) newCondition := n.Status.GetCondition(buildv1alpha1.Succeeded) - if oldCondition != nil && newCondition != nil { - if (oldCondition.Status == corev1.ConditionUnknown) && + if newCondition != nil { + if (oldCondition == nil || oldCondition.Status == corev1.ConditionUnknown) && (newCondition.Status == corev1.ConditionFalse || newCondition.Status == corev1.ConditionTrue) { - if n.Status.BuildSpec != nil && n.Status.BuildSpec.Retention != nil && - (n.Status.BuildSpec.Retention.FailedLimit != nil || n.Status.BuildSpec.Retention.SucceededLimit != nil) { - return true - } + return true } } return false @@ -101,17 +115,13 @@ func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler, maxCo } // Watch for changes to primary resource Build - if err = c.Watch(&source.Kind{Type: &build.Build{}}, &handler.EnqueueRequestForObject{}, pred); err != nil { + if err = c.Watch(&source.Kind{Type: &buildv1alpha1.Build{}}, &handler.EnqueueRequestForObject{}, pred); err != nil { return err } // Watch for changes to resource BuildRun return c.Watch(&source.Kind{Type: &buildv1alpha1.BuildRun{}}, handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request { buildRun := o.(*buildv1alpha1.BuildRun) - // check if Buildrun is related to a build - if buildRun.Spec.BuildRef.Name == "" { - return []reconcile.Request{} - } return []reconcile.Request{ { diff --git a/pkg/reconciler/buildrun_ttl_cleanup/buildrun_ttl_cleanup.go b/pkg/reconciler/buildrun_ttl_cleanup/buildrun_ttl_cleanup.go deleted file mode 100644 index cca156dcb6..0000000000 --- a/pkg/reconciler/buildrun_ttl_cleanup/buildrun_ttl_cleanup.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright The Shipwright Contributors -// -// SPDX-License-Identifier: Apache-2.0 - -package buildrun_ttl_cleanup - -import ( - "context" - "time" - - buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" - "github.com/shipwright-io/build/pkg/config" - "github.com/shipwright-io/build/pkg/ctxlog" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -// ReconcileBuildRun reconciles a BuildRun object - -type ReconcileBuildRun struct { - /* This client, initialized using mgr.Client() above, is a split client - that reads objects from the cache and writes to the apiserver */ - config *config.Config - client client.Client - scheme *runtime.Scheme - setOwnerReferenceFunc setOwnerReferenceFunc -} - -func NewReconciler(c *config.Config, mgr manager.Manager, ownerRef setOwnerReferenceFunc) reconcile.Reconciler { - return &ReconcileBuildRun{ - config: c, - client: mgr.GetClient(), - scheme: mgr.GetScheme(), - setOwnerReferenceFunc: ownerRef, - } -} - -// GetBuildRunObject retrieves an existing BuildRun based on a name and namespace -func (r *ReconcileBuildRun) GetBuildRunObject(ctx context.Context, objectName string, objectNS string, buildRun *buildv1alpha1.BuildRun) error { - if err := r.client.Get(ctx, types.NamespacedName{Name: objectName, Namespace: objectNS}, buildRun); err != nil { - return err - } - return nil -} - -/* Reconciler makes sure the buildrun adheres to its ttl retention field and deletes it - once the ttl limit is hit */ -func (r *ReconcileBuildRun) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - // Set the ctx to be Background, as the top-level context for incoming requests.if - ctx, cancel := context.WithTimeout(ctx, r.config.CtxTimeOut) - defer cancel() - - ctxlog.Debug(ctx, "Start reconciling Buildrun-ttl", namespace, request.Namespace, name, request.Name) - - br := &buildv1alpha1.BuildRun{} - err := r.GetBuildRunObject(ctx, request.Name, request.Namespace, br) - if err != nil { - if apierrors.IsNotFound(err) { - ctxlog.Debug(ctx, "Finish reconciling buildrun-ttl. Buildrun was not found", namespace, request.Namespace, name, request.Name) - return reconcile.Result{}, nil - } - return reconcile.Result{}, err - } - - condition := br.Status.GetCondition(buildv1alpha1.Succeeded) - if condition == nil { - return reconcile.Result{}, nil - } - - /* In case ttl has been reached, delete the buildrun, if not, - calculate the remaining time and requeue the buildrun */ - switch condition.Status { - - case corev1.ConditionTrue: - if br.Status.BuildSpec.Retention.TtlAfterSucceeded != nil { - if br.Status.CompletionTime.Add(br.Status.BuildSpec.Retention.TtlAfterSucceeded.Duration).Before(time.Now()) { - ctxlog.Info(ctx, "Deleting successful buildrun as ttl has been reached.", namespace, request.Namespace, name, request.Name) - deleteBuildRunErr := r.client.Delete(ctx, br, &client.DeleteOptions{}) - if deleteBuildRunErr != nil { - if apierrors.IsNotFound(deleteBuildRunErr) { - return reconcile.Result{}, nil - } - ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, br.Name, deleteError, deleteBuildRunErr) - return reconcile.Result{}, deleteBuildRunErr - } - } else { - timeLeft := br.Status.CompletionTime.Add(br.Status.BuildSpec.Retention.TtlAfterSucceeded.Duration).Sub(time.Now()) - return reconcile.Result{Requeue: true, RequeueAfter: timeLeft}, nil - } - } - - case corev1.ConditionFalse: - if br.Status.BuildSpec.Retention.TtlAfterFailed != nil { - if br.Status.CompletionTime.Add(br.Status.BuildSpec.Retention.TtlAfterFailed.Duration).Before(time.Now()) { - ctxlog.Info(ctx, "Deleting failed buildrun as ttl has been reached.", namespace, request.Namespace, name, request.Name) - deleteBuildRunErr := r.client.Delete(ctx, br, &client.DeleteOptions{}) - if deleteBuildRunErr != nil { - if apierrors.IsNotFound(deleteBuildRunErr) { - return reconcile.Result{}, nil - } - ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, br.Name, deleteError, deleteBuildRunErr) - return reconcile.Result{}, deleteBuildRunErr - } - } else { - timeLeft := br.Status.CompletionTime.Add(br.Status.BuildSpec.Retention.TtlAfterFailed.Duration).Sub(time.Now()) - return reconcile.Result{Requeue: true, RequeueAfter: timeLeft}, nil - } - } - } - ctxlog.Debug(ctx, "Finishing reconciling request from a BuildRun event", namespace, request.Namespace, name, request.Name) - return reconcile.Result{}, nil -} diff --git a/pkg/reconciler/buildrun_ttl_cleanup/controller.go b/pkg/reconciler/buildrun_ttl_cleanup/controller.go deleted file mode 100644 index f36e0b8013..0000000000 --- a/pkg/reconciler/buildrun_ttl_cleanup/controller.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright The Shipwright Contributors -// -// SPDX-License-Identifier: Apache-2.0 - -package buildrun_ttl_cleanup - -import ( - "context" - - buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" - "github.com/shipwright-io/build/pkg/config" - "github.com/shipwright-io/build/pkg/ctxlog" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -const ( - namespace string = "namespace" - name string = "name" - deleteError string = "error" -) - -type setOwnerReferenceFunc func(owner, object metav1.Object, scheme *runtime.Scheme) error - -// Add creates a new BuildRun Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(ctx context.Context, c *config.Config, mgr manager.Manager) error { - ctx = ctxlog.NewContext(ctx, "buildrun-ttl-cleanup-controller") - return add(ctx, mgr, NewReconciler(c, mgr, controllerutil.SetControllerReference), c.Controllers.BuildRun.MaxConcurrentReconciles) -} - -func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles int) error { - // Create the controller options - options := controller.Options{ - Reconciler: r, - } - - if maxConcurrentReconciles > 0 { - options.MaxConcurrentReconciles = maxConcurrentReconciles - - } - - c, err := controller.New("buildrun-ttl-cleanup-controller", mgr, options) - if err != nil { - return err - } - - predBuildRun := predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - - o := e.Object.(*buildv1alpha1.BuildRun) - - if (o.Status.BuildSpec != nil) && (o.Status.BuildSpec.Retention != nil) && - (o.Status.BuildSpec.Retention.TtlAfterFailed != nil || o.Status.BuildSpec.Retention.SucceededLimit != nil) { - return true - } - return false - }, - UpdateFunc: func(e event.UpdateEvent) bool { - o := e.ObjectNew.(*buildv1alpha1.BuildRun) - - if (o.Status.BuildSpec != nil) && (o.Status.BuildSpec.Retention != nil) && - (o.Status.BuildSpec.Retention.TtlAfterFailed != nil || o.Status.BuildSpec.Retention.SucceededLimit != nil) { - return true - } - return false - }, - DeleteFunc: func(e event.DeleteEvent) bool { - // Never reconcile on deletion, there is nothing we have to do - return false - }, - } - // Watch for changes to primary resource BuildRun - if err = c.Watch(&source.Kind{Type: &buildv1alpha1.BuildRun{}}, &handler.EnqueueRequestForObject{}, predBuildRun); err != nil { - return err - } - return nil -} diff --git a/pkg/reconciler/buildrunttlcleanup/buildrun_ttl_cleanup.go b/pkg/reconciler/buildrunttlcleanup/buildrun_ttl_cleanup.go new file mode 100644 index 0000000000..b1f581e7fd --- /dev/null +++ b/pkg/reconciler/buildrunttlcleanup/buildrun_ttl_cleanup.go @@ -0,0 +1,99 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package buildrunttlcleanup + +import ( + "context" + "time" + + buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/build/pkg/config" + "github.com/shipwright-io/build/pkg/ctxlog" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ReconcileBuildRun reconciles a BuildRun object +type ReconcileBuildRun struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + config *config.Config + client client.Client +} + +func NewReconciler(c *config.Config, mgr manager.Manager) reconcile.Reconciler { + return &ReconcileBuildRun{ + config: c, + client: mgr.GetClient(), + } +} + +// Reconcile makes sure the buildrun adheres to its ttl retention field and deletes it +// once the ttl limit is hit +func (r *ReconcileBuildRun) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + // Set the ctx to be Background, as the top-level context for incoming requests. + ctx, cancel := context.WithTimeout(ctx, r.config.CtxTimeOut) + defer cancel() + + ctxlog.Debug(ctx, "Start reconciling Buildrun-ttl", namespace, request.Namespace, name, request.Name) + + br := &buildv1alpha1.BuildRun{} + err := r.client.Get(ctx, types.NamespacedName{Name: request.Name, Namespace: request.Namespace}, br) + if err != nil { + if apierrors.IsNotFound(err) { + ctxlog.Debug(ctx, "Finish reconciling buildrun-ttl. Buildrun was not found", namespace, request.Namespace, name, request.Name) + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + condition := br.Status.GetCondition(buildv1alpha1.Succeeded) + if condition == nil || condition.Status == corev1.ConditionUnknown { + return reconcile.Result{}, nil + } + var ttl *metav1.Duration + if condition.Status == corev1.ConditionTrue { + if br.Spec.Retention != nil && br.Spec.Retention.TTLAfterSucceeded != nil { + ttl = br.Spec.Retention.TTLAfterSucceeded + } else if br.Status.BuildSpec != nil && br.Status.BuildSpec.Retention != nil && br.Status.BuildSpec.Retention.TTLAfterSucceeded != nil { + ttl = br.Status.BuildSpec.Retention.TTLAfterSucceeded + } + } else { + if br.Spec.Retention != nil && br.Spec.Retention.TTLAfterFailed != nil { + ttl = br.Spec.Retention.TTLAfterFailed + } else if br.Status.BuildSpec != nil && br.Status.BuildSpec.Retention != nil && br.Status.BuildSpec.Retention.TTLAfterFailed != nil { + ttl = br.Status.BuildSpec.Retention.TTLAfterFailed + } + } + + // check if BuildRun still has a TTL + if ttl == nil { + return reconcile.Result{}, nil + } + + if br.Status.CompletionTime.Add(ttl.Duration).Before(time.Now()) { + ctxlog.Info(ctx, "Deleting buildrun as ttl has been reached.", namespace, request.Namespace, name, request.Name) + err := r.client.Delete(ctx, br, &client.DeleteOptions{}) + if err != nil { + if !apierrors.IsNotFound(err) { + ctxlog.Debug(ctx, "Error deleting buildRun.", namespace, request.Namespace, name, br.Name, "error", err) + return reconcile.Result{}, err + } + ctxlog.Debug(ctx, "Error deleting buildRun. It has already been deleted.", namespace, request.Namespace, name, br.Name) + return reconcile.Result{}, nil + } + } else { + timeLeft := time.Until(br.Status.CompletionTime.Add(ttl.Duration)) + return reconcile.Result{Requeue: true, RequeueAfter: timeLeft}, nil + } + + ctxlog.Debug(ctx, "Finishing reconciling request from a BuildRun event", namespace, request.Namespace, name, request.Name) + return reconcile.Result{}, nil +} diff --git a/pkg/reconciler/buildrunttlcleanup/controller.go b/pkg/reconciler/buildrunttlcleanup/controller.go new file mode 100644 index 0000000000..46568672e8 --- /dev/null +++ b/pkg/reconciler/buildrunttlcleanup/controller.go @@ -0,0 +1,146 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package buildrunttlcleanup + +import ( + "context" + + buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/build/pkg/config" + "github.com/shipwright-io/build/pkg/ctxlog" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + namespace string = "namespace" + name string = "name" +) + +// Add creates a new BuildRun_ttl_cleanup Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(ctx context.Context, c *config.Config, mgr manager.Manager) error { + ctx = ctxlog.NewContext(ctx, "buildrun-ttl-cleanup-controller") + return add(ctx, mgr, NewReconciler(c, mgr), c.Controllers.BuildRun.MaxConcurrentReconciles) +} + +// reconcileCompletedBuildRun returns true if the object has the required TTL parameters +func reconcileCompletedBuildRun(condition *buildv1alpha1.Condition, o *buildv1alpha1.BuildRun) bool { + if condition.Status == corev1.ConditionTrue { + // check if a successful BuildRun has a TTL after succeeded value set + if o.Spec.Retention != nil && o.Spec.Retention.TTLAfterSucceeded != nil { + return true + } + + if o.Status.BuildSpec != nil && o.Status.BuildSpec.Retention != nil && o.Status.BuildSpec.Retention.TTLAfterSucceeded != nil { + return true + } + } else { + // check if a failed BuildRun has a TTL after failed + if o.Spec.Retention != nil && o.Spec.Retention.TTLAfterFailed != nil { + return true + } + + if o.Status.BuildSpec != nil && o.Status.BuildSpec.Retention != nil && o.Status.BuildSpec.Retention.TTLAfterFailed != nil { + return true + } + } + return false +} + +// reconcileAlreadyCompletedBuildRun returns true only if the TTL limit was introduced +// or if it was lowered as the object was completed before the update +func reconcileAlreadyCompletedBuildRun(newCondition *buildv1alpha1.Condition, oldCondition *buildv1alpha1.Condition, n *buildv1alpha1.BuildRun, o *buildv1alpha1.BuildRun) bool { + if newCondition.Status == corev1.ConditionTrue { + // check if a successful BuildRun has a TTL that was lowered or introduced + if (o.Spec.Retention == nil || o.Spec.Retention.TTLAfterSucceeded == nil) && n.Spec.Retention != nil && n.Spec.Retention.TTLAfterSucceeded != nil { + return true + } + + if o.Spec.Retention != nil && o.Spec.Retention.TTLAfterSucceeded != nil && n.Spec.Retention != nil && n.Spec.Retention.TTLAfterSucceeded != nil && n.Spec.Retention.TTLAfterSucceeded.Duration < o.Spec.Retention.TTLAfterSucceeded.Duration { + return true + } + } else { + // check if a failed BuildRun has a TTL that was lowered or introduced + if (o.Spec.Retention == nil || o.Spec.Retention.TTLAfterFailed == nil) && n.Spec.Retention != nil && n.Spec.Retention.TTLAfterFailed != nil { + return true + } + + if o.Spec.Retention != nil && o.Spec.Retention.TTLAfterFailed != nil && n.Spec.Retention != nil && n.Spec.Retention.TTLAfterFailed != nil && n.Spec.Retention.TTLAfterFailed.Duration < o.Spec.Retention.TTLAfterFailed.Duration { + return true + } + } + return false +} + +func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles int) error { + // Create the controller options + options := controller.Options{ + Reconciler: r, + } + + if maxConcurrentReconciles > 0 { + options.MaxConcurrentReconciles = maxConcurrentReconciles + + } + + c, err := controller.New("buildrun-ttl-cleanup-controller", mgr, options) + if err != nil { + return err + } + + predBuildRun := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + // Reconcile if TTL values are set + o := e.Object.(*buildv1alpha1.BuildRun) + + // ignore a running BuildRun + condition := o.Status.GetCondition(buildv1alpha1.Succeeded) + if condition == nil || condition.Status == corev1.ConditionUnknown { + return false + } + + return reconcileCompletedBuildRun(condition, o) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + // check if the updated object is completed + n := e.ObjectNew.(*buildv1alpha1.BuildRun) + newCondition := n.Status.GetCondition(buildv1alpha1.Succeeded) + if newCondition == nil || newCondition.Status == corev1.ConditionUnknown { + return false + } + + o := e.ObjectOld.(*buildv1alpha1.BuildRun) + oldCondition := o.Status.GetCondition(buildv1alpha1.Succeeded) + + // for objects that failed or just completed, check if a matching TTL is set + if oldCondition == nil || oldCondition.Status == corev1.ConditionUnknown { + return reconcileCompletedBuildRun(newCondition, n) + } + + // for objects that were already complete, check if the TTL was lowered or introduced + if oldCondition != nil && oldCondition.Status != corev1.ConditionUnknown { + return reconcileAlreadyCompletedBuildRun(newCondition, oldCondition, n, o) + } + + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Never reconcile on deletion, there is nothing we have to do + return false + }, + } + // Watch for changes to primary resource BuildRun + if err = c.Watch(&source.Kind{Type: &buildv1alpha1.BuildRun{}}, &handler.EnqueueRequestForObject{}, predBuildRun); err != nil { + return err + } + return nil +} diff --git a/test/build_samples.go b/test/build_samples.go index 03d357de99..120da8a822 100644 --- a/test/build_samples.go +++ b/test/build_samples.go @@ -566,3 +566,23 @@ spec: failedLimit: 1 succeededLimit: 2 ` + +// MinimalBuildWithRetentionTTL defines a simple +// Build with a source, a strategy ttl +const MinimalBuildWithRetentionTTLOneMin = ` +apiVersion: shipwright.io/v1alpha1 +kind: Build +metadata: + name: build-retention-ttl +spec: + source: + url: "https://github.com/shipwright-io/sample-go" + contextDir: docker-build + strategy: + kind: ClusterBuildStrategy + output: + image: image-registry.openshift-image-registry.svc:5000/example/buildpacks-app + retention: + ttlAfterFailed: 1m + ttlAfterSucceeded: 1m +` diff --git a/test/buildrun_samples.go b/test/buildrun_samples.go index 7660f61de7..f583f36919 100644 --- a/test/buildrun_samples.go +++ b/test/buildrun_samples.go @@ -212,3 +212,31 @@ spec: buildRef: name: build-retention-ttl ` + +// MinimalBuildRunRetention defines a minimal BuildRun +// with a reference used to test retention fields +const MinimalBuildRunRetentionTTLFive = ` +apiVersion: shipwright.io/v1alpha1 +kind: BuildRun +metadata: + name: buidrun-retention-ttl +spec: + buildRef: + name: build-retention-ttl + retention: + ttlAfterFailed: 5s + ttlAfterSucceeded: 5s +` + +const MinimalBuildahBuildRunWithExitCode = ` +apiVersion: shipwright.io/v1alpha1 +kind: BuildRun +metadata: + name: buildah-run +spec: + paramValues: + - name: exit-command + value: "true" + buildRef: + name: buildah +` diff --git a/test/clusterbuildstrategy_samples.go b/test/clusterbuildstrategy_samples.go index a9593b0a0c..77d23d7d86 100644 --- a/test/clusterbuildstrategy_samples.go +++ b/test/clusterbuildstrategy_samples.go @@ -217,6 +217,10 @@ kind: ClusterBuildStrategy metadata: name: noop spec: + parameters: + - name: exit-command + description: "Exit command for the pod" + default: "true" buildSteps: - name: step-no-and-op image: alpine:latest @@ -240,7 +244,7 @@ spec: - name: AWS_SECRET_KEY value: NOT_SET command: - - "true" + - $(params.exit-command) resources: limits: cpu: 250m @@ -344,35 +348,3 @@ spec: args: - $(params.sleep-time) ` - -// Use a simple strategy that does not push the image -const ClusterBuildStrategySingleStepNoPush = ` -apiVersion: shipwright.io/v1alpha1 -kind: ClusterBuildStrategy -metadata: - name: buildah -spec: - buildSteps: - - name: buildah-bud - image: quay.io/containers/buildah:v1.20.1 - workingDir: $(params.shp-source-root) - securityContext: - privileged: true - command: - - /usr/bin/buildah - args: - - bud - - --tag=$(params.shp-output-image) - - --file=$(build.dockerfile) - - $(params.shp-source-context) - resources: - limits: - cpu: 500m - memory: 1Gi - requests: - cpu: 250m - memory: 65Mi - volumeMounts: - - name: buildah-images - mountPath: /var/lib/containers/storage -` diff --git a/test/integration/buildrun_cleanup_test.go b/test/integration/buildrun_cleanup_test.go index 7cede008f3..51c587ad6d 100644 --- a/test/integration/buildrun_cleanup_test.go +++ b/test/integration/buildrun_cleanup_test.go @@ -17,7 +17,6 @@ import ( var _ = Describe("Integration tests for retention limits and ttls for succeeded buildruns.", func() { var ( cbsObject *v1alpha1.ClusterBuildStrategy - cbsObjectFail *v1alpha1.ClusterBuildStrategy buildObject *v1alpha1.Build buildRunObject *v1alpha1.BuildRun buildSample []byte @@ -74,7 +73,78 @@ var _ = Describe("Integration tests for retention limits and ttls for succeeded br, err := tb.GetBRTillCompletion(buildRunObject.Name) Expect(err).To(BeNil()) Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) - br, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Minute) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("When a buildrun with short ttl set succeeds. TTL field exists in the buildrun spec", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should not find the buildrun few seconds after it succeeds", func() { + + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("When a buildrun with short ttl set fails because buildref not found. TTL field exists in the buildrun spec", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should not find the buildrun few seconds after it fails", func() { + + buildRunObject.Spec.BuildRef.Name = "non-existent-buildref" + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("When we lower a successfully completed buildrun's ttl retention parameters. Original param - 5m, Updated param - 5s", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should not find the buildrun few seconds after it succeeds", func() { + + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + // Make the TTL 5 minutes + buildRunObject.Spec.Retention.TTLAfterSucceeded.Duration = time.Minute * 5 + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + + // Make the TTL 5 seconds + br.Spec.Retention.TTLAfterSucceeded.Duration = time.Second * 5 + Expect(tb.UpdateBR(br)).To(BeNil()) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) }) @@ -86,7 +156,7 @@ var _ = Describe("Integration tests for retention limits and ttls for succeeded buildRunSample = []byte(test.MinimalBuildRunRetention) }) - It("The older successful buildrun should not exist", func() { + It("Should not find the older successful buildrun.", func() { Expect(tb.CreateBuild(buildObject)).To(BeNil()) @@ -106,29 +176,43 @@ var _ = Describe("Integration tests for retention limits and ttls for succeeded Expect(err).To(BeNil()) Expect(br2.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) - _, err = tb.GetBR(BUILDRUN + tb.Namespace + "-1") + _, err = tb.GetBRTillNotFound(BUILDRUN+tb.Namespace+"-1", time.Second*1, time.Second*5) Expect(apierrors.IsNotFound(err)).To(BeTrue()) + _, err = tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-2") + Expect(err).To(BeNil()) }) }) - Context("Multiple buildruns with different build limits for failure and success", func() { + Context("When a buildrun that has TTL defined in its spec and in the corresponding build's spec succeeds", func() { BeforeEach(func() { - cbsObjectFail, err = tb.Catalog.LoadCBSWithName(STRATEGY+tb.Namespace+"-fail", []byte(test.ClusterBuildStrategySingleStepNoPush)) + buildSample = []byte(test.MinimalBuildWithRetentionTTLOneMin) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should honor the TTL defined in the buildrun.", func() { + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) Expect(err).To(BeNil()) + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) - // Load build strategy that can fail - err = tb.CreateClusterBuildStrategy(cbsObjectFail) + br, err := tb.GetBRTillCompletion(buildRunObject.Name) Expect(err).To(BeNil()) - buildSample = []byte(test.MinimalBuildWithRetentionLimitDiff) - buildRunSample = []byte(test.MinimalBuildRunRetention) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionTrue)) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) + }) + + Context("Multiple buildruns with different build limits for failure and success", func() { - AfterEach(func() { - err := tb.DeleteClusterBuildStrategy(cbsObjectFail.Name) + BeforeEach(func() { Expect(err).To(BeNil()) + buildSample = []byte(test.MinimalBuildWithRetentionLimitDiff) + buildRunSample = []byte(test.MinimalBuildahBuildRunWithExitCode) }) // Case with failedLimit 1 and succeededLimit 2. // It ensures that only relevant buildruns are affected by retention parameters @@ -146,17 +230,11 @@ var _ = Describe("Integration tests for retention limits and ttls for succeeded buildRunObject.Name = BUILDRUN + tb.Namespace + "-success-2" err = tb.CreateBR(buildRunObject) - // Load and create the failing buildobject with the relevant cbs - buildObject, err = tb.Catalog.LoadBuildWithNameAndStrategy(BUILD+tb.Namespace+"-fail", STRATEGY+tb.Namespace+"-fail", buildSample) - Expect(err).To(BeNil()) - buildObject.Spec.Source.ContextDir = nil - Expect(tb.CreateBuild(buildObject)).To(BeNil()) - buildObject, err = tb.GetBuildTillValidation(buildObject.Name) - Expect(err).To(BeNil()) - // Create 1 failed buildrun buildRunObject.Name = BUILDRUN + tb.Namespace + "-fail-1" - buildRunObject.Spec.BuildRef.Name = BUILD + tb.Namespace + "-fail" + str := "false" + falseParam := v1alpha1.ParamValue{Name: "exit-command", SingleValue: &v1alpha1.SingleValue{Value: &str}} + buildRunObject.Spec.ParamValues = []v1alpha1.ParamValue{falseParam} err = tb.CreateBR(buildRunObject) // Wait for buildrun completion @@ -174,18 +252,19 @@ var _ = Describe("Integration tests for retention limits and ttls for succeeded // Create 1 failed buildrun. buildRunObject.Name = BUILDRUN + tb.Namespace + "-fail-2" - buildRunObject.Spec.BuildRef.Name = BUILD + tb.Namespace + "-fail" + buildRunObject.Spec.ParamValues = []v1alpha1.ParamValue{falseParam} err = tb.CreateBR(buildRunObject) + Expect(err).To(BeNil()) br4, err := tb.GetBRTillCompletion(buildRunObject.Name) Expect(err).To(BeNil()) Expect(br4.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) // Check that the older failed buildrun has been deleted while the successful buildruns exist - _, err = tb.GetBR(BUILDRUN + tb.Namespace + "-fail-1") + _, err = tb.GetBRTillNotFound(BUILDRUN+tb.Namespace+"-fail-1", time.Second*1, time.Second*5) Expect(apierrors.IsNotFound(err)).To(BeTrue()) - _, err = tb.GetBR(BUILDRUN + tb.Namespace + "-success-1") + _, err = tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-success-1") Expect(err).To(BeNil()) - _, err = tb.GetBR(BUILDRUN + tb.Namespace + "-success-2") + _, err = tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-success-2") Expect(err).To(BeNil()) }) }) @@ -202,7 +281,7 @@ var _ = Describe("Integration tests for retention limits and ttls of buildRuns t ) // Load the ClusterBuildStrategies before each test case BeforeEach(func() { - cbsObject, err = tb.Catalog.LoadCBSWithName(STRATEGY+tb.Namespace, []byte(test.ClusterBuildStrategySingleStepNoPush)) + cbsObject, err = tb.Catalog.LoadCBSWithName(STRATEGY+tb.Namespace, []byte(test.ClusterBuildStrategyNoOp)) Expect(err).To(BeNil()) err = tb.CreateClusterBuildStrategy(cbsObject) @@ -228,19 +307,21 @@ var _ = Describe("Integration tests for retention limits and ttls of buildRuns t if buildRunSample != nil { buildRunObject, err = tb.Catalog.LoadBRWithNameAndRef(BUILDRUN+tb.Namespace, BUILD+tb.Namespace, buildRunSample) Expect(err).To(BeNil()) + str := "false" + falseParam := v1alpha1.ParamValue{Name: "exit-command", SingleValue: &v1alpha1.SingleValue{Value: &str}} + buildRunObject.Spec.ParamValues = []v1alpha1.ParamValue{falseParam} } }) - Context("When a buildrun related to a build with short ttl set succeeds", func() { + Context("When a buildrun related to a build with short ttl set fails", func() { BeforeEach(func() { buildSample = []byte(test.MinimalBuildWithRetentionTTLFive) buildRunSample = []byte(test.MinimalBuildRunRetention) }) - It("Should not find the buildrun after few seconds after it fails", func() { + It("Should not find the buildrun few seconds after it fails", func() { - buildObject.Spec.Source.ContextDir = nil Expect(tb.CreateBuild(buildObject)).To(BeNil()) buildObject, err = tb.GetBuildTillValidation(buildObject.Name) @@ -251,11 +332,86 @@ var _ = Describe("Integration tests for retention limits and ttls of buildRuns t br, err := tb.GetBRTillCompletion(buildRunObject.Name) Expect(err).To(BeNil()) Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) - br, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Minute) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) }) + Context("When we lower a failed completed buildrun's ttl retention parameters. Original param - 5m, Updated param - 5s", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should not find the buildrun few seconds after it succeeds", func() { + + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + + // Make the TTL 5 minutes + buildRunObject.Spec.Retention.TTLAfterFailed.Duration = time.Minute * 5 + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + + // Make the TTL 5 seconds + br.Spec.Retention.TTLAfterFailed.Duration = time.Second * 5 + Expect(tb.UpdateBR(br)).To(BeNil()) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("When a buildrun with short ttl set fails. TTL is defined in the buildrun specs", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionLimitOne) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should not find the buildrun few seconds after it fails", func() { + + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + Context("When a buildrun that has TTL defined in its spec and on the corresponding build's spec fails", func() { + + BeforeEach(func() { + buildSample = []byte(test.MinimalBuildWithRetentionTTLOneMin) + buildRunSample = []byte(test.MinimalBuildRunRetentionTTLFive) + }) + + It("Should honor the TTL defined in the buildrun.", func() { + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildObject, err = tb.GetBuildTillValidation(buildObject.Name) + Expect(err).To(BeNil()) + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + + br, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(br.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) + _, err = tb.GetBRTillNotFound(buildRunObject.Name, time.Second*1, time.Second*15) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + + }) + }) + Context("Multiple failed buildruns related to build with limit 1", func() { BeforeEach(func() { @@ -263,8 +419,7 @@ var _ = Describe("Integration tests for retention limits and ttls of buildRuns t buildRunSample = []byte(test.MinimalBuildRunRetention) }) - It("The older failed buildrun should not exist", func() { - buildObject.Spec.Source.ContextDir = nil + It("Should not find the older failed buildrun", func() { Expect(tb.CreateBuild(buildObject)).To(BeNil()) buildObject, err = tb.GetBuildTillValidation(buildObject.Name) @@ -283,10 +438,11 @@ var _ = Describe("Integration tests for retention limits and ttls of buildRuns t Expect(err).To(BeNil()) Expect(br2.Status.GetCondition(v1alpha1.Succeeded).Status).To(Equal(corev1.ConditionFalse)) - _, err = tb.GetBR(BUILDRUN + tb.Namespace + "-1") + _, err = tb.GetBRTillNotFound(BUILDRUN+tb.Namespace+"-1", time.Second*1, time.Second*5) Expect(apierrors.IsNotFound(err)).To(BeTrue()) + _, err = tb.GetBRTillCompletion(BUILDRUN + tb.Namespace + "-2") + Expect(err).To(BeNil()) }) }) - })