From f03684577e9abb4129913b9b9da7e0ddfb36a6fd Mon Sep 17 00:00:00 2001 From: John Kyros <79665180+jkyros@users.noreply.github.com> Date: Tue, 19 Apr 2022 18:28:36 -0500 Subject: [PATCH 1/9] Add structures defining pool-owned build resources We need to specify what resources the pool should be creating/owning in order to manage builds and the relationships between them (triggers, which buildconfigs should source from which streams, etc). The pool needs some template/plan to follow when it gets triggered to create and verify that its resources exist. This is my not-quite-as-elegant-as-I-had-hoped attempt at that. --- pkg/controller/build/pool_build_resources.go | 91 ++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 pkg/controller/build/pool_build_resources.go diff --git a/pkg/controller/build/pool_build_resources.go b/pkg/controller/build/pool_build_resources.go new file mode 100644 index 0000000000..1d1fdd95bc --- /dev/null +++ b/pkg/controller/build/pool_build_resources.go @@ -0,0 +1,91 @@ +package build + +// This was arose from the need to keep track of resources the pool should ensure the presence of. +// It is not as elegant as it should be, but the problem I am trying to solve is capturing: +// 1.) what resources the pool should ensure the presence of and +// 2.) the relationships between them + +import ( + "bytes" + "text/template" + + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" +) + +type PoolResourceNames struct { + ImageStream PoolImageStreamList + BuildConfig PoolBuildConfigList +} + +type PoolImageStreamList struct { + Base string + RenderedConfig string + Content string + CustomContent string + External string + PerNode string +} + +type PoolBuildConfigList struct { + Content PoolBuildConfig + CustomContent PoolBuildConfig +} + +type PoolBuildConfig struct { + Name string + Source string + Target string + TriggeredByStreams []string + DockerfileContent string +} + +func PoolBuildResources(pool *mcfgv1.MachineConfigPool) *PoolResourceNames { + + pisl := PoolImageStreamList{ + Base: pool.Name + ctrlcommon.ImageStreamSuffixCoreOS, + RenderedConfig: pool.Name + ctrlcommon.ImageStreamSuffixRenderedConfig, + Content: pool.Name + ctrlcommon.ImageStreamSuffixMCOContent, + CustomContent: pool.Name + ctrlcommon.ImageStreamSuffixMCOContentCustom, + External: pool.Name + ctrlcommon.ImageStreamSuffixMCOContentExternal, + PerNode: pool.Name + ctrlcommon.ImageStreamSuffixMCOContentPerNode, + } + + // Templated dockerfile for the complicated mco-content buildconfig that applies the rendered-config + t, _ := template.New(machineConfigContentDockerfile).Parse(machineConfigContentDockerfile) + var tpl bytes.Buffer + t.Execute(&tpl, pisl) + + bcl := PoolBuildConfigList{ + Content: PoolBuildConfig{ + Name: pool.Name + "-build" + ctrlcommon.ImageStreamSuffixMCOContent, + Source: pisl.Base, + Target: pisl.Content, + TriggeredByStreams: []string{pisl.RenderedConfig + ":latest"}, + DockerfileContent: tpl.String(), + }, + CustomContent: PoolBuildConfig{ + Name: pool.Name + "-build" + ctrlcommon.ImageStreamSuffixMCOContentCustom, + Source: pisl.Content, + Target: pisl.CustomContent, + TriggeredByStreams: []string{}, + DockerfileContent: dummyDockerfile, + }, + } + + return &PoolResourceNames{pisl, bcl} +} + +// IsManagedImageStream tells us if a given image stream name is one of the names we think we should be managing. This is used to tell if +// someone has assigned some completely unmanaged imagestream to our layered pool. +func (prn *PoolResourceNames) IsManagedImageStream(imageStreamName string) bool { + // TODO(jkyros): The longer this goes on, the more I feel this should be a map + if imageStreamName == prn.ImageStream.Base || + imageStreamName == prn.ImageStream.Content || + imageStreamName == prn.ImageStream.CustomContent || + imageStreamName == prn.ImageStream.External || + imageStreamName == prn.ImageStream.PerNode { + return true + } + return false +} From 3a4f2d4f33c7f2c742745d0fe718311b22413f20 Mon Sep 17 00:00:00 2001 From: John Kyros <79665180+jkyros@users.noreply.github.com> Date: Tue, 19 Apr 2022 18:44:52 -0500 Subject: [PATCH 2/9] Add build controller for layered/image builds The build controller "takes over" for pools that are marked as layered. It will more or less serve the function of the render controller but for image based pools. The render controller currently retains the functionality of generating the rendered config, and will push that rendered config into an imagestream every time it gets updated. --- pkg/controller/build/build_controller.go | 1334 ++++++++++++++++++ pkg/controller/build/pool_build_resources.go | 19 +- 2 files changed, 1348 insertions(+), 5 deletions(-) create mode 100644 pkg/controller/build/build_controller.go diff --git a/pkg/controller/build/build_controller.go b/pkg/controller/build/build_controller.go new file mode 100644 index 0000000000..1059950558 --- /dev/null +++ b/pkg/controller/build/build_controller.go @@ -0,0 +1,1334 @@ +package build + +import ( + "context" + "encoding/json" + "fmt" + "sort" + "time" + + "github.com/golang/glog" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + imageinformersv1 "github.com/openshift/client-go/image/informers/externalversions/image/v1" + imagelistersv1 "github.com/openshift/client-go/image/listers/image/v1" + + buildinformersv1 "github.com/openshift/client-go/build/informers/externalversions/build/v1" + buildlistersv1 "github.com/openshift/client-go/build/listers/build/v1" + + buildclientset "github.com/openshift/client-go/build/clientset/versioned" + imageclientset "github.com/openshift/client-go/image/clientset/versioned" + + mcfgclientset "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned" + mcfginformersv1 "github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions/machineconfiguration.openshift.io/v1" + mcfglistersv1 "github.com/openshift/machine-config-operator/pkg/generated/listers/machineconfiguration.openshift.io/v1" + + buildv1 "github.com/openshift/api/build/v1" + imagev1 "github.com/openshift/api/image/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + ign3types "github.com/coreos/ignition/v2/config/v3_2/types" + "github.com/vincent-petithory/dataurl" +) + +const ( + + // maxRetries is the number of times a machineconfig pool will be retried before it is dropped out of the queue. + // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times + // a machineconfig pool is going to be requeued: + // + // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s + maxRetries = 15 + + // updateDelay is a pause to deal with churn in MachineConfigs; see + // https://github.com/openshift/machine-config-operator/issues/301 + updateDelay = 5 * time.Second + + machineConfigContentDockerfile = ` + # Multistage build, we need to grab the files from our config imagestream + FROM image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/{{.RenderedConfig }} AS machineconfig + + # We're actually basing on the "new format" image from the coreos base image stream + FROM image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/coreos + + # Pull in the files from our machineconfig stage + COPY --from=machineconfig /machine-config-ignition.json /etc/machine-config-ignition.json + + # Make the config drift checker happy + COPY --from=machineconfig /machine-config.json /etc/machine-config-daemon/currentconfig + + # Apply the config to the image + ENV container=1 + RUN exec -a ignition-apply /usr/lib/dracut/modules.d/30ignition/ignition --ignore-unsupported /etc/machine-config-ignition.json + + # Rebuild origin.d (I included an /etc/yum.repos.d/ file in my machineconfig so it could find the RPMS, that's why this works) + RUN rpm-ostree ex rebuild && rm -rf /var/cache /etc/rpm-ostree/origin.d + + # clean up. We want to be particularly strict so that live apply works + RUN rm /etc/machine-config-ignition.json + # TODO remove these hacks once we have + # https://github.com/coreos/rpm-ostree/pull/3544 + # and + # https://github.com/coreos/ignition/issues/1339 is fixed + # don't fail if wildcard has no matches + RUN bash -c "rm /usr/share/rpm/__db.*"; true + # to keep live apply working + RUN bash -c "if [[ -e /etc/systemd/system-preset/20-ignition.preset ]]; then sort /etc/systemd/system-preset/20-ignition.preset -o /etc/systemd/system-preset/20-ignition.preset; fi" + + # This is so we can get the machineconfig injected + ARG machineconfig=unknown + # Apply the injected machineconfig name as a label so node_controller can check it + LABEL machineconfig=$machineconfig + ` + dummyDockerfile = `FROM dummy` +) + +var ( + // controllerKind contains the schema.GroupVersionKind for this controller type. + controllerKind = mcfgv1.SchemeGroupVersion.WithKind("MachineConfigPool") +) + +// Controller defines the build controller. +type Controller struct { + client mcfgclientset.Interface + imageclient imageclientset.Interface + buildclient buildclientset.Interface + kubeclient clientset.Interface + + eventRecorder record.EventRecorder + + syncHandler func(mcp string) error + enqueueMachineConfigPool func(*mcfgv1.MachineConfigPool) + + ccLister mcfglistersv1.ControllerConfigLister + mcpLister mcfglistersv1.MachineConfigPoolLister + bLister buildlistersv1.BuildLister + bcLister buildlistersv1.BuildConfigLister + isLister imagelistersv1.ImageStreamLister + + ccListerSynced cache.InformerSynced + mcpListerSynced cache.InformerSynced + bListerSynced cache.InformerSynced + bcListerSynced cache.InformerSynced + isListerSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface +} + +// New returns a new node controller. +func New( + ccInformer mcfginformersv1.ControllerConfigInformer, + mcpInformer mcfginformersv1.MachineConfigPoolInformer, + isInformer imageinformersv1.ImageStreamInformer, + bcInformer buildinformersv1.BuildConfigInformer, + bInformer buildinformersv1.BuildInformer, + mcfgClient mcfgclientset.Interface, + kubeClient clientset.Interface, + imageClient imageclientset.Interface, + buildClient buildclientset.Interface, +) *Controller { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) + + ctrl := &Controller{ + client: mcfgClient, + imageclient: imageClient, + kubeclient: kubeClient, + buildclient: buildClient, + eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineconfigcontroller-buildcontroller"}), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineconfigcontroller-buildcontroller"), + } + + mcpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addMachineConfigPool, + UpdateFunc: ctrl.updateMachineConfigPool, + DeleteFunc: ctrl.deleteMachineConfigPool, + }) + bInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addBuild, + UpdateFunc: ctrl.updateBuild, + DeleteFunc: ctrl.deleteBuild, + }) + bcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addBuildConfig, + UpdateFunc: ctrl.updateBuildConfig, + DeleteFunc: ctrl.deleteBuildConfig, + }) + isInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addImageStream, + UpdateFunc: ctrl.updateImageStream, + DeleteFunc: ctrl.deleteImageStream, + }) + + ctrl.syncHandler = ctrl.syncMachineConfigPool + ctrl.enqueueMachineConfigPool = ctrl.enqueueDefault + + ctrl.ccLister = ccInformer.Lister() + ctrl.mcpLister = mcpInformer.Lister() + ctrl.isLister = isInformer.Lister() + ctrl.bcLister = bcInformer.Lister() + ctrl.bLister = bInformer.Lister() + + ctrl.ccListerSynced = ccInformer.Informer().HasSynced + ctrl.mcpListerSynced = mcpInformer.Informer().HasSynced + ctrl.isListerSynced = isInformer.Informer().HasSynced + ctrl.bcListerSynced = bcInformer.Informer().HasSynced + ctrl.bListerSynced = bInformer.Informer().HasSynced + + return ctrl +} + +// Run executes the render controller. +func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer ctrl.queue.ShutDown() + + if !cache.WaitForCacheSync(stopCh, ctrl.mcpListerSynced, ctrl.ccListerSynced, ctrl.bListerSynced, ctrl.bcListerSynced, ctrl.isListerSynced) { + return + } + + glog.Info("Starting MachineConfigController-BuildController") + defer glog.Info("Shutting down MachineConfigController-BuildController") + + for i := 0; i < workers; i++ { + go wait.Until(ctrl.worker, time.Second, stopCh) + } + + <-stopCh +} + +func (ctrl *Controller) enqueue(pool *mcfgv1.MachineConfigPool) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) + return + } + + ctrl.queue.Add(key) +} + +func (ctrl *Controller) enqueueRateLimited(pool *mcfgv1.MachineConfigPool) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) + return + } + + ctrl.queue.AddRateLimited(key) +} + +// enqueueAfter will enqueue a pool after the provided amount of time. +func (ctrl *Controller) enqueueAfter(pool *mcfgv1.MachineConfigPool, after time.Duration) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) + return + } + + ctrl.queue.AddAfter(key, after) +} + +// enqueueDefault calls a default enqueue function +func (ctrl *Controller) enqueueDefault(pool *mcfgv1.MachineConfigPool) { + ctrl.enqueueAfter(pool, updateDelay) +} + +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncHandler is never invoked concurrently with the same key. +func (ctrl *Controller) worker() { + for ctrl.processNextWorkItem() { + } +} + +func (ctrl *Controller) processNextWorkItem() bool { + key, quit := ctrl.queue.Get() + if quit { + return false + } + defer ctrl.queue.Done(key) + + err := ctrl.syncHandler(key.(string)) + ctrl.handleErr(err, key) + + return true +} + +func (ctrl *Controller) handleErr(err error, key interface{}) { + if err == nil { + ctrl.queue.Forget(key) + return + } + + if ctrl.queue.NumRequeues(key) < maxRetries { + glog.V(2).Infof("Error syncing machineconfigpool %v: %v", key, err) + ctrl.queue.AddRateLimited(key) + return + } + + utilruntime.HandleError(err) + glog.V(2).Infof("Dropping machineconfigpool %q out of the queue: %v", key, err) + ctrl.queue.Forget(key) + ctrl.queue.AddAfter(key, 1*time.Minute) +} + +// TODO(jkyros): the question we're trying to answer is "is there any content that has changed that is not reflected in the current image for the pool" + +// syncMachineConfigPool will sync the machineconfig pool with the given key. +// This function is not meant to be invoked concurrently with the same key. +func (ctrl *Controller) syncMachineConfigPool(key string) error { + startTime := time.Now() + glog.V(4).Infof("Started syncing machineconfigpool %q (%v)", key, startTime) + defer func() { + glog.V(4).Infof("Finished syncing machineconfigpool %q (%v)", key, time.Since(startTime)) + }() + + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + machineconfigpool, err := ctrl.mcpLister.Get(name) + if errors.IsNotFound(err) { + glog.V(2).Infof("MachineConfigPool %v has been deleted", key) + return nil + } + if err != nil { + return err + } + + // Make sure the shared base CoreOS imagestream exists + // TODO(jkyros): There seems to be a delay (probably the time it takes to pull the image) before the image tag shows up. As a result, + // when we create our base imagestream later, it's empty until this gets populated and triggers it. + _, err = ctrl.ensureCoreOSImageStream() + if err != nil { + return err + } + + pool := machineconfigpool.DeepCopy() + + // TODO(jkyros): take this out when we decide actual UX, this just forces the layered label on to + // the pool if its name is the string "layered" + if pool.Name == "layered" { + if pool.Labels == nil { + pool.Labels = map[string]string{} + } + // TODO(jkyros): we'll see if we like this, but we need a way to specify which imagestream it should use + pool.Labels[ctrlcommon.ExperimentalLayeringPoolLabel] = "" + + // TODO(jkyros): Don't update this here. We're just doing this now to "steal" the pool from render_controller + _, err = ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}) + if err != nil { + return err + } + } + + // If this pool isn't managed by us, the render controller will handle it + if !ctrlcommon.IsLayeredPool(pool) { + return nil + } + + // TODO(jkyros): I *could* have the build controller do the config rendering here for the pools + // that the build controller manages, but there is no escaping at least some modification to the render + // controller telling it to ignore the pools the build controller is managing. + + // Stuff an entitlements machineconfig into the pool + + ctrl.experimentalAddEntitlements(pool) + + glog.V(2).Infof("Ensuring image streams exist for pool %s", pool.Name) + + // Get the mapping/list of resources this pool should ensure and own + pbr := PoolBuildResources(pool) + + // Our list of imagestreams we need to ensure exists + var ensureImageStreams = []string{ + pbr.ImageStream.Base, + pbr.ImageStream.ExternalBase, + pbr.ImageStream.RenderedConfig, + pbr.ImageStream.Content, + pbr.ImageStream.CustomContent, + pbr.ImageStream.External, + } + + // Make sure the imagestreams exist so we can populate them with our image builds + for _, imageStreamName := range ensureImageStreams { + _, err := ctrl.ensureImageStreamForPool(pool, imageStreamName, pbr) + if err != nil { + // I don't know if it existed or not, I couldn't get it + return fmt.Errorf("Failed to ensure ImageStream %s: %w", imageStreamName, err) + } + + } + + // Magically switch imagestreams if custom/external end up with images in them + err = ctrl.ensureImageStreamPrecedenceIfPopulated(pool) + if err != nil { + return fmt.Errorf("Could not ensure proper imagestream was selected for pool %s: %w", pool.Name, err) + } + + // TODO(jkyros): we could have just now set our imagestream based on changes, but we might not have a build yet + + // Figure out which imagestream the pool is deploying from + poolImageStreamName, err := ctrlcommon.GetPoolImageStream(pool) + if err != nil { + return err + } + + // Get the actual image stream object for that imagestream + poolImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(poolImageStreamName) + if err != nil { + return err + } + + // Get the most recent image from that stream if it exists + // TODO(jkyros): this can be nil + mostRecentPoolImage := ctrl.getMostRecentImageTagForImageStream(poolImageStream, "latest") + + // Our list of imagestreams we need to ensure exists + var ensureBuildConfigs = []PoolBuildConfig{ + pbr.BuildConfig.Content, + pbr.BuildConfig.CustomContent, + } + + for num, pbc := range ensureBuildConfigs { + + checkBuildConfig, err := ctrl.ensureBuildConfigForPool(pool, &ensureBuildConfigs[num]) + if err != nil { + // I don't know if it existed or not, I couldn't get it + return fmt.Errorf("Failed to ensure BuildConfig %s: %w", pbc.Name, err) + } + + // We're looking for builds that belong to this buildconfig, so craft a filter + ourBuildReq, err := labels.NewRequirement("buildconfig", selection.In, []string{checkBuildConfig.Name}) + if err != nil { + return err + } + // Make a selector based on our requirement + ourBuildSelector := labels.NewSelector().Add(*ourBuildReq) + + // Retrieve those builds that belong to this buildconfig + builds, err := ctrl.bLister.Builds(ctrlcommon.MCONamespace).List(ourBuildSelector) + if err != nil { + return err + } + + // If builds exist for this buildconfig + if len(builds) > 0 { + // Sort the builds in descending order, we want the newest first + sort.Slice(builds, func(i, j int) bool { + return builds[i].CreationTimestamp.After(builds[j].CreationTimestamp.Time) + }) + + // This is the newest, and we know it can't be outof bounds because of how we got here + // TODO(jkyros): If a newer build has been queued, should we terminate the old one? + mostRecentBuild := builds[0] + + // TODO(jkyros): We need to find a "level triggered" way to figure out if the image we have is representative + // of the state of our "build ladder" so we know if a failed build is a problem or not. Ultimately a metadata problem. + if mostRecentPoolImage == nil || mostRecentPoolImage.Created.Before(&mostRecentBuild.CreationTimestamp) { + // If they failed/are in bad phase, we're probably in trouble + switch mostRecentBuild.Status.Phase { + case buildv1.BuildPhaseError: + glog.Errorf("Need to degrade, build %s is %s", mostRecentBuild.Name, mostRecentBuild.Status.Phase) + case buildv1.BuildPhaseFailed: + glog.Errorf("Need to degrade, build %s is %s", mostRecentBuild.Name, mostRecentBuild.Status.Phase) + case buildv1.BuildPhaseCancelled: + glog.Errorf("Need to degrade, build %s is %s", mostRecentBuild.Name, mostRecentBuild.Status.Phase) + case buildv1.BuildPhaseComplete: + glog.Errorf("A build %s has completed for pool %s", mostRecentBuild.Name, mostRecentBuild.Status.Phase) + default: + // If they worked okay, we're building, we can update our status? + glog.Infof("A build %s is in progress (%s) for pool %s", mostRecentBuild.Name, mostRecentBuild.Status.Phase, pool.Name) + } + + } + } + + } + + // Do we have an image stream for this pool? We should if we got here. + is, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(poolImageStream.Name) + if apierrors.IsNotFound(err) { + // TODO(jkyros): As cgwalters points out, this should probably degrade because it should exist + glog.Warningf("ImageStream for %s does not exist (yet?): %s", pool.Name, err) + } else { + // If there is an image ready, annotate the pool with it so node controller can use it if it's the right one + err := ctrl.annotatePoolWithNewestImage(is, pool) + if err != nil { + return err + } + } + + // TODO(jkyros): Only update if we changed, don't always update. Also, if we update here and then update status again, that seems + // wasteful. + _, err = ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}) + if err != nil { + return err + } + + return ctrl.syncAvailableStatus(pool) + +} + +// Machine Config Pools + +func (ctrl *Controller) addMachineConfigPool(obj interface{}) { + pool := obj.(*mcfgv1.MachineConfigPool) + glog.V(4).Infof("Adding MachineConfigPool %s", pool.Name) + ctrl.enqueueMachineConfigPool(pool) + +} + +func (ctrl *Controller) updateMachineConfigPool(old, cur interface{}) { + oldPool := old.(*mcfgv1.MachineConfigPool) + curPool := cur.(*mcfgv1.MachineConfigPool) + + glog.V(4).Infof("Updating MachineConfigPool %s", oldPool.Name) + ctrl.enqueueMachineConfigPool(curPool) +} + +func (ctrl *Controller) deleteMachineConfigPool(obj interface{}) { + pool, ok := obj.(*mcfgv1.MachineConfigPool) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + return + } + pool, ok = tombstone.Obj.(*mcfgv1.MachineConfigPool) + if !ok { + utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a MachineConfigPool %#v", obj)) + return + } + } + glog.V(4).Infof("Deleting MachineConfigPool %s", pool.Name) +} + +// ImagStreams + +func (ctrl *Controller) addImageStream(obj interface{}) { + +} + +func (ctrl *Controller) updateImageStream(old, cur interface{}) { + imagestream := cur.(*imagev1.ImageStream) + controllerRef := metav1.GetControllerOf(imagestream) + + if controllerRef != nil { + + if pool := ctrl.resolveControllerRef(controllerRef); pool != nil { + + glog.Infof("ImageStream %s changed for pool %s", imagestream.Name, pool.Name) + + // TODO(jkyros): This is a race I usually win, but I won't always, and we need a better + // way to get this metadata in + if imagestream.Name == pool.Name+ctrlcommon.ImageStreamSuffixRenderedConfig { + ctrl.cheatMachineConfigLabelIntoBuildConfig(imagestream, pool) + } + + ctrl.enqueueMachineConfigPool(pool) + + } + + } +} + +func (ctrl *Controller) deleteImageStream(obj interface{}) { + // TODO(jkyros): probably worth enqueueing the pool again here just so + // our sync can figure out that this newly-deleted stream is now empty and update the mappings ? +} + +// Builds + +func (ctrl *Controller) addBuild(obj interface{}) { + build := obj.(*buildv1.Build) + + glog.Infof("Added a build: %s", build.Name) + + // TODO(jkyros): Is this one of our builds that belongs to our imagestream? + // If it is, we should mark that somewhere so we know the pool is "building" + +} + +func (ctrl *Controller) updateBuild(old, cur interface{}) { + build := old.(*buildv1.Build) + + glog.Infof("Updated a build: %s", build.Name) + // Builds will move through phases which cause them to change + // Most of those phases are standard/good, but some of them are bad + // We want to know if we end up in a bad phase and need to retry + ctrl.enqueuePoolIfBuildProblems(build) + +} + +func (ctrl *Controller) deleteBuild(obj interface{}) { + build := obj.(*buildv1.Build) + + glog.Infof("Deleted a build: %s", build.Name) + +} + +// Buildconfigs + +func (ctrl *Controller) addBuildConfig(obj interface{}) { + buildconfig := obj.(*buildv1.BuildConfig) + + glog.Infof("Added a buildconfig: %s", buildconfig.Name) + +} + +func (ctrl *Controller) updateBuildConfig(old, cur interface{}) { + buildconfig := old.(*buildv1.BuildConfig) + newbuildconfig := cur.(*buildv1.BuildConfig) + + glog.Infof("Updated a buildconfig: %s", buildconfig.Name) + + // Every time a buildconfig is instantiated it bumps the generation, so it always looks like it's changing + // For now we really only care if the user edited the dockerfile, and that string is a pointer + if buildconfig.Spec.Source.Dockerfile != nil && newbuildconfig.Spec.Source.Dockerfile != nil { + if *buildconfig.Spec.Source.Dockerfile != *newbuildconfig.Spec.Source.Dockerfile { + + glog.Infof("The dockerfile for buildconfig %s changed, triggering a build", buildconfig.Name) + // TODO(jkyros); If this is the mco content, we need the machineconfig name + // so go get the image from that imagestream and put the name in. Otherwise just start it. + + br := &buildv1.BuildRequest{ + //TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{Name: buildconfig.Name}, + //Env: []corev1.EnvVar{whichConfig}, + TriggeredBy: []buildv1.BuildTriggerCause{ + {Message: "The machine config controller"}, + }, + DockerStrategyOptions: &buildv1.DockerStrategyOptions{ + //BuildArgs: []corev1.EnvVar{whichConfig}, + //NoCache: new(bool), + }, + } + + _, err := ctrl.buildclient.BuildV1().BuildConfigs(ctrlcommon.MCONamespace).Instantiate(context.TODO(), br.Name, br, metav1.CreateOptions{}) + if err != nil { + glog.Errorf("Failed to trigger image build: %s", err) + } + } + } + +} + +func (ctrl *Controller) deleteBuildConfig(obj interface{}) { + buildconfig := obj.(*buildv1.BuildConfig) + + glog.Infof("Deleted a buildconfig: %s", buildconfig.Name) + +} + +// experimentalAddEntitlements grabs the cluster entitlement certificates out of the openshift-config-managed namespace and +// stuffs them into a machineconfig for our layered pool, so we can have entitled builds. This is a terrible practice, and +// we should probably just sync the secrets into our namespace so our builds can use them directly rather than expose them via machineconfig. +func (ctrl *Controller) experimentalAddEntitlements(pool *mcfgv1.MachineConfigPool) { + + var entitledConfigName = fmt.Sprintf("99-%s-entitled-build", pool.Name) + + // If it's not there, put it there, otherwise do nothing + _, err := ctrl.client.MachineconfigurationV1().MachineConfigs().Get(context.TODO(), entitledConfigName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + + // Repo configuration for redhat package entitlements ( I just added base and appstream) + // TODO(jkyros): do this right once subscription-manager is included in RHCOS + redhatRepo := `[rhel-8-for-x86_64-baseos-rpms] +name = Red Hat Enterprise Linux 8 for x86_64 - BaseOS (RPMs) +baseurl = https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os +enabled = 1 +gpgcheck = 0 +sslverify = 0 +sslclientkey = /etc/pki/entitlement/entitlement-key.pem +sslclientcert = /etc/pki/entitlement/entitlement.pem +metadata_expire = 86400 +enabled_metadata = 1 + +[rhel-8-for-x86_64-appstream-rpms] +name = Red Hat Enterprise Linux 8 for x86_64 - AppStream (RPMs) +baseurl = https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os +enabled = 1 +gpgcheck = 0 +sslverify = 0 +sslclientkey = /etc/pki/entitlement/entitlement-key.pem +sslclientcert = /etc/pki/entitlement/entitlement.pem +metadata_expire = 86400 +enabled_metadata = 1 +` + + // Make an ignition to stuff into our machineconfig + ignConfig := ctrlcommon.NewIgnConfig() + ignConfig.Storage.Files = append(ignConfig.Storage.Files, NewIgnFile("/etc/yum.repos.d/redhat.repo", redhatRepo)) + + // Get our entitlement secrets out of the managed namespace + entitlements, err := ctrl.kubeclient.CoreV1().Secrets("openshift-config-managed").Get(context.TODO(), "etc-pki-entitlement", metav1.GetOptions{}) + if err != nil { + glog.Warningf("Could not retrieve entitlement secret: %s", err) + return + } + + // Add the key to the file list + if key, ok := entitlements.Data["entitlement-key.pem"]; ok { + ignConfig.Storage.Files = append(ignConfig.Storage.Files, NewIgnFile("/etc/pki/entitlement/entitlement-key.pem", string(key))) + } + + // Add the public key to the file list + if pub, ok := entitlements.Data["entitlement.pem"]; ok { + ignConfig.Storage.Files = append(ignConfig.Storage.Files, NewIgnFile("/etc/pki/entitlement/entitlement.pem", string(pub))) + } + + // Now it's a machineconfig + mc, err := ctrlcommon.MachineConfigFromIgnConfig(pool.Name, entitledConfigName, ignConfig) + if err != nil { + glog.Warningf("Could not create machineconfig for entitlements: %s", err) + } + + // Add it to the list for this pool + _, err = ctrl.client.MachineconfigurationV1().MachineConfigs().Create(context.TODO(), mc, metav1.CreateOptions{}) + if err != nil { + glog.Warningf("Failed to add entitlements to layered pool: %s", err) + } + } + +} + +// annotatePoolWithNewestImage looks in the corresponding image stream for a pool and annotates the name of the image, and it's +// corresponding rendered-config, which it retrieves from the image's docker metadata labels that we added during our build +func (ctrl *Controller) annotatePoolWithNewestImage(imageStream *imagev1.ImageStream, pool *mcfgv1.MachineConfigPool) error { + + // We don't want to crash if these are empty + if pool.Annotations == nil { + pool.Annotations = map[string]string{} + } + + // Grab the latest tag from the imagestream. If we don't have one, nothing happens + for _, tag := range imageStream.Status.Tags { + if len(tag.Items) == 0 { + continue + } + + // I might have an older image that has right machine config content, but some + // other content might have changed (like, I dunno, base image) so we shouldn't go back + // to older images + image := tag.Items[0] + + // If this is different than our current tag, grab it and annotate the pool + glog.Infof("imagestream %s newest is: %s (%s)", imageStream.Name, image.DockerImageReference, image.Image) + if pool.Spec.Configuration.Name == image.Image { + // We're already theer, don't touch it + return nil + } + + // get the actual image so we can read its labels + fullImage, err := ctrl.imageclient.ImageV1().Images().Get(context.TODO(), image.Image, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("Could not retrieve image %s: %w", image.Image, err) + } + + // We need the labels out of the docker image but it's a raw extension + dockerLabels := struct { + Config struct { + Labels map[string]string `json:"Labels"` + } `json:"Config"` + }{} + + // Get the labels out and see what config this is + err = json.Unmarshal(fullImage.DockerImageMetadata.Raw, &dockerLabels) + if err != nil { + return fmt.Errorf("Could not get labels from docker image metadata: %w", err) + } + + // Tag what config this came from so we know it's the right image + if machineconfig, ok := dockerLabels.Config.Labels["machineconfig"]; ok { + pool.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] = machineconfig + pool.Spec.Configuration.ObjectReference = corev1.ObjectReference{ + Kind: "Image", + Name: image.Image, + } + // TODO(jkyros): Kind of cheating using this as metadata showback for the user until we figure out our "level triggering" strategy + pool.Spec.Configuration.Source = []corev1.ObjectReference{ + // What machine config was the assigned image build using + {Kind: "MachineConfig", Name: machineconfig, Namespace: ctrlcommon.MCONamespace}, + // What imagestream did it come out of + {Kind: "ImageStream", Name: imageStream.Name, Namespace: ctrlcommon.MCONamespace}, + // The non-sha image reference just for convenience + {Kind: "DockerImageReference", Name: image.DockerImageReference, Namespace: ctrlcommon.MCONamespace}, + } + + } + + // TODO(jkyros): Probably need to go through our eventing "state machine" to make sure our steps make sense + ctrl.eventRecorder.Event(pool, corev1.EventTypeNormal, "Updated", "Moved pool "+pool.Name+" to layered image "+image.DockerImageReference) + + } + + return nil +} + +func (ctrl *Controller) CreateBuildConfigForImageStream(pool *mcfgv1.MachineConfigPool, buildConfigName, sourceImageStreamName string, targetImageStream *imagev1.ImageStream, dockerFile string, triggerOnImageTags ...string) (*buildv1.BuildConfig, error) { + // Construct a buildconfig for this pool if it doesn't exist + + skipLayers := buildv1.ImageOptimizationSkipLayers + buildConfig := &buildv1.BuildConfig{ + + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: buildConfigName, + Namespace: ctrlcommon.MCONamespace, + Annotations: map[string]string{ + "machineconfiguration.openshift.io/pool": pool.Name, + }, + }, + Spec: buildv1.BuildConfigSpec{ + RunPolicy: "Serial", + // Simple dockerfile build, just the text from the dockerfile + CommonSpec: buildv1.CommonSpec{ + Source: buildv1.BuildSource{ + Type: "Dockerfile", + Dockerfile: &dockerFile, + }, + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{ + // This will override the last FROM in our builds, but we want that + From: &corev1.ObjectReference{ + Kind: "ImageStreamTag", + Name: sourceImageStreamName + ":latest", + }, + // Squashing layers is good as long as it doesn't cause problems with what + // the users want to do. It says "some syntax is not supported" + ImageOptimizationPolicy: &skipLayers, + }, + Type: "Docker", + }, + // Output to the imagestreams we made before + Output: buildv1.BuildOutput{ + To: &corev1.ObjectReference{ + Kind: "ImageStreamTag", + Name: targetImageStream.Name + ":latest", + }, + ImageLabels: []buildv1.ImageLabel{ + // The pool that this image was built for + {Name: "io.openshift.machineconfig.pool", Value: pool.Name}, + }, + // TODO(jkyros): I want to label these images with which rendered config they were built from + // but there doesn't seem to be a way to get it in there easily + }, + }, + + Triggers: []buildv1.BuildTriggerPolicy{ + { + // This blank one signifies "just trigger on the from image specified in the strategy" + Type: "ImageChange", + ImageChange: &buildv1.ImageChangeTrigger{}, + }, + }, + }, + } + + // Pause the custom build config by default so it doesn't build automatically unless we enable it + if buildConfigName == pool.Name+"-build"+ctrlcommon.ImageStreamSuffixMCOContentCustom { + buildConfig.Spec.Triggers[0].ImageChange.Paused = true + } + + // TODO(jkyros): pull this out if we handle these triggers ourselves, because we might need the control + // If additional triggers, add them to the config + + for _, tag := range triggerOnImageTags { + buildConfig.Spec.Triggers = append(buildConfig.Spec.Triggers, buildv1.BuildTriggerPolicy{ + Type: "ImageChange", + ImageChange: &buildv1.ImageChangeTrigger{ + LastTriggeredImageID: "", + From: &corev1.ObjectReference{ + Kind: "ImageStreamTag", + Name: tag, + }, + }, + }) + + } + + // Set the owner references so these get cleaned up if the pool gets deleted + poolKind := mcfgv1.SchemeGroupVersion.WithKind("MachineConfigPool") + oref := metav1.NewControllerRef(pool, poolKind) + buildConfig.SetOwnerReferences([]metav1.OwnerReference{*oref}) + + // Create the buildconfig + return ctrl.buildclient.BuildV1().BuildConfigs(ctrlcommon.MCONamespace).Create(context.TODO(), buildConfig, metav1.CreateOptions{}) + +} + +// TODO(jkyros): don't leave this here, expose it properly if you're gonna use it +// StrToPtr returns a pointer to a string +func StrToPtr(s string) *string { + return &s +} + +// TODO(jkyros): don't leave this here, expose it properly if you're gonna use it +// NewIgnFile returns a simple ignition3 file from just path and file contents +func NewIgnFile(path, contents string) ign3types.File { + return ign3types.File{ + Node: ign3types.Node{ + Path: path, + }, + FileEmbedded1: ign3types.FileEmbedded1{ + Contents: ign3types.Resource{ + Source: StrToPtr(dataurl.EncodeBytes([]byte(contents)))}, + }, + } +} + +// TODO(jkyros): some quick functions to go with our image stream informer so we can watch imagestream update +func (ctrl *Controller) resolveControllerRef(controllerRef *metav1.OwnerReference) *mcfgv1.MachineConfigPool { + // We can't look up by UID, so look up by Name and then verify UID. + // Don't even try to look up by Name if it's the wrong Kind. + if controllerRef.Kind != controllerKind.Kind { + return nil + } + pool, err := ctrl.mcpLister.Get(controllerRef.Name) + if err != nil { + return nil + } + + if pool.UID != controllerRef.UID { + // The controller we found with this Name is not the same one that the + // ControllerRef points to. + return nil + } + return pool +} + +//nolint:unparam +func (ctrl *Controller) getMostRecentImageTagForImageStream(poolImageStream *imagev1.ImageStream, desiredTag string) *imagev1.TagEvent { + // Get the most recent image + for _, tag := range poolImageStream.Status.Tags { + if tag.Tag == desiredTag { + // TODO(jkyros): don't crash if this is empty + if len(tag.Items) > 0 { + return &tag.Items[0] + } + } + } + return nil +} + +func (ctrl *Controller) syncAvailableStatus(pool *mcfgv1.MachineConfigPool) error { + if mcfgv1.IsMachineConfigPoolConditionFalse(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded) { + return nil + } + sdegraded := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionFalse, "", "") + mcfgv1.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) + if _, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { + return err + } + return nil +} + +func (ctrl *Controller) syncFailingStatus(pool *mcfgv1.MachineConfigPool, err error) error { + sdegraded := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionTrue, "", fmt.Sprintf("Failed to render configuration for pool %s: %v", pool.Name, err)) + mcfgv1.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) + if _, updateErr := ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); updateErr != nil { + glog.Errorf("Error updating MachineConfigPool %s: %v", pool.Name, updateErr) + } + return err +} + +func (ctrl *Controller) updateBuildConfigWithLabels(buildConfig *buildv1.BuildConfig, labels map[string]string) (*buildv1.BuildConfig, error) { + + newBuildConfig := buildConfig.DeepCopy() + for labelKey, labelValue := range labels { + il := buildv1.ImageLabel{Name: labelKey, Value: labelValue} + newBuildConfig.Spec.Output.ImageLabels = append(newBuildConfig.Spec.Output.ImageLabels, il) + } + + return ctrl.buildclient.BuildV1().BuildConfigs(ctrlcommon.MCONamespace).Update(context.TODO(), newBuildConfig, metav1.UpdateOptions{}) +} + +// ensureCoreOSImageStream creates the base CoreOS imagestream that is owned by no pool and serves as the default source of the +// base images for the layered pools' base image streams +func (ctrl *Controller) ensureCoreOSImageStream() (*imagev1.ImageStream, error) { + + checkImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(ctrlcommon.CoreOSImageStreamName) + if apierrors.IsNotFound(err) { + controllerConfig, err := ctrl.ccLister.Get(ctrlcommon.ControllerConfigName) + if err != nil { + return nil, fmt.Errorf("could not get ControllerConfig %w", err) + } + + newImageStream := &imagev1.ImageStream{ + ObjectMeta: metav1.ObjectMeta{ + Name: ctrlcommon.CoreOSImageStreamName, + Namespace: ctrlcommon.MCONamespace, + }, + Spec: imagev1.ImageStreamSpec{ + LookupPolicy: imagev1.ImageLookupPolicy{Local: false}, + DockerImageRepository: "", + Tags: []imagev1.TagReference{ + { + Name: "latest", + From: &corev1.ObjectReference{ + Kind: "DockerImage", + Name: controllerConfig.Spec.BaseOperatingSystemContainer, + }, + }, + }, + }, + } + checkImageStream, err = ctrl.imageclient.ImageV1().ImageStreams(ctrlcommon.MCONamespace).Create(context.TODO(), newImageStream, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("Attempted to create ImageStream %s but failed: %w", newImageStream, err) + } + glog.Infof("Created image stream %s", ctrlcommon.CoreOSImageStreamName) + } else if err != nil { + return nil, err + } + + return checkImageStream, nil + +} + +func (ctrl *Controller) ensureImageStreamForPool(pool *mcfgv1.MachineConfigPool, imageStreamName string, pbr *PoolResourceNames) (*imagev1.ImageStream, error) { + // Check to see if we have the imagestream already + checkImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(imageStreamName) + if apierrors.IsNotFound(err) { + // Create the imagestream if it doesn't already exist + // It doesn't exist, so we need to make it, otherwise our builds will fail + newImageStream := &imagev1.ImageStream{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "machineconfiguration.openshift.io/pool": pool.Name, + }, + }, + } + newImageStream.Name = imageStreamName + newImageStream.Namespace = ctrlcommon.MCONamespace + newImageStream.Spec.LookupPolicy.Local = false + + // Set ownerships so these get cleaned up if we delete the pool + // TODO(jkyros): I have no idea if this actually cleans the images out of the stream if we delete it? + poolKind := mcfgv1.SchemeGroupVersion.WithKind("MachineConfigPool") + oref := metav1.NewControllerRef(pool, poolKind) + newImageStream.SetOwnerReferences([]metav1.OwnerReference{*oref}) + + // coreos imagestream is base, it's special, it needs to pull that image + if imageStreamName == pbr.ImageStream.Base { + + newImageStream.Spec = imagev1.ImageStreamSpec{ + LookupPolicy: imagev1.ImageLookupPolicy{Local: false}, + DockerImageRepository: "", + Tags: []imagev1.TagReference{ + { + Name: "latest", + From: &corev1.ObjectReference{ + Kind: "DockerImage", + Name: "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/coreos", + }, + }, + }, + } + + } + + // TODO(jkyros): your data structure for this is clearly inelegant, fix it + if imageStreamName == pbr.ImageStream.Base || imageStreamName == pbr.ImageStream.RenderedConfig { + newImageStream.Annotations["machineconfig.openshift.io/buildconfig"] = pbr.BuildConfig.Content.Name + } + if imageStreamName == pbr.ImageStream.Content { + newImageStream.Annotations["machineconfig.openshift.io/buildconfig"] = pbr.BuildConfig.CustomContent.Name + } + + // It didn't exist, put the imagestream in the cluster + checkImageStream, err = ctrl.imageclient.ImageV1().ImageStreams(ctrlcommon.MCONamespace).Create(context.TODO(), newImageStream, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("Attempted to create ImageStream %s but failed: %w", newImageStream, err) + } + glog.Infof("Created image stream %s", imageStreamName) + } + return checkImageStream, nil +} + +func (ctrl *Controller) ensureBuildConfigForPool(pool *mcfgv1.MachineConfigPool, pbc *PoolBuildConfig) (*buildv1.BuildConfig, error) { + checkBuildConfig, err := ctrl.bcLister.BuildConfigs(ctrlcommon.MCONamespace).Get(pbc.Name) + if apierrors.IsNotFound(err) { + + // We are making this buildconfig owned by the imagestream it's building to + // TODO(jkyros): I really do feel like the buildconfig belongs to the stream because it populates the stream + ownerStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(pbc.Target) + if err != nil { + return nil, fmt.Errorf("Failed to retrieve owner imagestream: %w", err) + } + // Make the build since it doesn't exist, and set checkBuildConfig so we can use it below + checkBuildConfig, err = ctrl.CreateBuildConfigForImageStream(pool, pbc.Name, pbc.Source, ownerStream, pbc.DockerfileContent, pbc.TriggeredByStreams...) + if err != nil { + return nil, err + } + glog.Infof("BuildConfig %s has been created for pool %s", pbc.Name, pool.Name) + } else if err != nil { + // some other error happened + return nil, err + } + return checkBuildConfig, nil +} + +func (ctrl *Controller) enqueuePoolIfBuildProblems(build *buildv1.Build) { + // If it's in a good state, save the suffering and move on + if isGoodBuildPhase(build.Status.Phase) { + return + } + + // TODO(jkyros): sequester this in a function somewhere + + // If it's in a bad phase, our pool might care if it's one of ours + + // See who owns the build + controllerRef := metav1.GetControllerOf(build) + + // If the build is owned by a buildconfig, see if it's one of ours + if controllerRef.Kind == "BuildConfig" { + buildConfig, err := ctrl.bcLister.BuildConfigs(ctrlcommon.MCONamespace).Get(controllerRef.Name) + if err != nil { + glog.Errorf("Failed to retrieve controlling buildconfig %s for build %s: %s", controllerRef.Name, build.Name, err) + } + + // See if the buildconfig is controlled by our pool + buildConfigControllerRef := metav1.GetControllerOf(buildConfig) + if controllerRef != nil { + pool := ctrl.resolveControllerRef(buildConfigControllerRef) + // If it is our pool, then enqueue it + if pool != nil { + ctrl.enqueueMachineConfigPool(pool) + } + + } + + } +} + +// isGoodBuildPhase determines whether a build is okay, or if it had a problem that we potentially need to take action on. This is used to decide +// whether or not re-queue a machineconfig pool to check on its builds if the build came from one of its build controllers. +func isGoodBuildPhase(buildPhase buildv1.BuildPhase) bool { + + if buildPhase != buildv1.BuildPhaseFailed && buildPhase != buildv1.BuildPhaseCancelled && buildPhase != buildv1.BuildPhaseError { + return true + } + return false +} + +func (ctrl *Controller) getLabelsForImageRef(imageRef string) (map[string]string, error) { + fullImage, err := ctrl.imageclient.ImageV1().Images().Get(context.TODO(), imageRef, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("Could not retrieve image %s: %w", imageRef, err) + } + + // We need the labels out of the docker image but it's a raw extension + dockerLabels := struct { + Config struct { + Labels map[string]string `json:"Labels"` + } `json:"Config"` + }{} + + // Get the labels out and see what config this is + err = json.Unmarshal(fullImage.DockerImageMetadata.Raw, &dockerLabels) + if err != nil { + return nil, fmt.Errorf("Could not get labels from docker image metadata: %w", err) + } + return dockerLabels.Config.Labels, nil +} + +// ensureImageStreamPrecedenceIfPopulated tries to make the UX cleaner by automatically switching the pool to use custom/external imagestreams +// if it looks like the user has populated them. It will switch back if those imagestreams get cleared out. This is really just to save the user from +// having to update the pool annotations themselves. +func (ctrl *Controller) ensureImageStreamPrecedenceIfPopulated(pool *mcfgv1.MachineConfigPool) error { + glog.Infof("Ensuring imagestreams are populated for %s", pool.Name) + // Get the list of what resources should exist for this pool + pbr := PoolBuildResources(pool) + + // Get the imagestream object for the external base imagestream + coreosImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(ctrlcommon.CoreOSImageStreamName) + if err != nil { + return err + } + + // Get the imagestream object for the external base imagestream + baseImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(pbr.ImageStream.Base) + if err != nil { + return err + } + + // Get the imagestream object for the external base imagestream + externalBaseImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(pbr.ImageStream.ExternalBase) + if err != nil { + return err + } + + // Get the imagestream object for the external imagestream + externalImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(pbr.ImageStream.External) + if err != nil { + return err + } + + // Get the imagestream objects for the custom imagestream, too + customImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(pbr.ImageStream.CustomContent) + if err != nil { + return err + } + + // Retrieve the name of the imagestream we're currently using + poolImageStreamName, _ := ctrlcommon.GetPoolImageStream(pool) + + // This is the place where we set the pool image stream if it's not set, so it's not an error here + if poolImageStreamName == "" { + ctrlcommon.SetPoolImageStream(pool, pbr.ImageStream.Content) + } + + // Get the latest tag from external base + latestExternalBaseImageTag := ctrl.getMostRecentImageTagForImageStream(externalBaseImageStream, "latest") + latestBaseImageTag := ctrl.getMostRecentImageTagForImageStream(baseImageStream, "latest") + latestCoreOSImageTag := ctrl.getMostRecentImageTagForImageStream(coreosImageStream, "latest") + + // If there is something in external base, we need to tag it into our base + if latestExternalBaseImageTag != nil { + + if latestBaseImageTag == nil || latestBaseImageTag.Image != latestExternalBaseImageTag.Image { + if latestBaseImageTag != nil { + glog.Infof("Latest base: %s Latest external: %s", latestBaseImageTag.Image, latestExternalBaseImageTag.Image) + } else { + glog.Infof("Latest base image tag was empty, assigning external") + } + err := ctrl.tagImageIntoStream(externalBaseImageStream.Name, baseImageStream.Name, latestExternalBaseImageTag.Image, "latest") + if err != nil { + return err + } + } + } else { + // If there is nothing in external base, we should use coreos as our base + if latestBaseImageTag == nil { + if latestCoreOSImageTag == nil { + return fmt.Errorf("we don't have a CoreOS image yet -- probably still downloading, need to wait") + } + } else { + glog.Infof("Latest base: %s Latest coreos: %s", latestBaseImageTag.Image, latestCoreOSImageTag.Image) + + // If what we have is different than what coreos has, we should use what coreos has instead + if latestBaseImageTag.Image != latestCoreOSImageTag.Image { + err := ctrl.tagImageIntoStream(coreosImageStream.Name, baseImageStream.Name, latestCoreOSImageTag.Image, "latest") + if err != nil { + return err + } + } + } + + } + + // If we aren't using the external image stream, and it is populated, we should switch to it + if poolImageStreamName != externalImageStream.Name && ctrl.getMostRecentImageTagForImageStream(externalImageStream, "latest") != nil { + // TODO(jkyros): Technically I event here before the update happens down below later, that seems dishonest for the user since + // at this point what I say happened hasn't happened yet + ctrl.eventRecorder.Event(pool, corev1.EventTypeNormal, "ImageStreamChange", "Image stream for pool "+pool.Name+" changed to "+externalImageStream.Name+ + " because it takes precedence and an image is present in it") + ctrlcommon.SetPoolImageStream(pool, pbr.ImageStream.External) + + // External isn't populated, see if we shuold fall back to custom if it has an image or an updated buildconfig + } else if poolImageStreamName != customImageStream.Name && ctrl.getMostRecentImageTagForImageStream(customImageStream, "latest") != nil { + ctrl.eventRecorder.Event(pool, corev1.EventTypeNormal, "ImageStreamChange", "Image stream for pool "+pool.Name+" changed to "+customImageStream.Name+ + " because it takes precedence and an image is present in it") + ctrlcommon.SetPoolImageStream(pool, pbr.ImageStream.CustomContent) + + } else if poolImageStreamName != pbr.ImageStream.Content { + // If we didn't catch one of the previous if blocks, we should be using the default MCO content stream. This lets us fall back + // if/when someone cleans out or deletes one of the imagestreams + // TODO(jkyros): This self-healing behavior does keep people from assigning arbitrary imagstreams (whether that's good or + // bad is up to us) + ctrl.eventRecorder.Event(pool, corev1.EventTypeNormal, "ImageStreamChange", "Image stream for pool "+pool.Name+" falling back to "+pbr.ImageStream.Content+ + " as others are unpopulated") + ctrlcommon.SetPoolImageStream(pool, pbr.ImageStream.Content) + } + return nil +} + +func (ctrl *Controller) tagImageIntoStream(sourceImageStreamName, targetImageStreamName, imageName, tagName string) error { + + var internalRegistry = "image-registry.openshift-image-registry.svc:5000/" + fullTargetTagName := targetImageStreamName + ":" + tagName + // If you don't get the namespace prefix on there, it tries to pull it from docker.io and fails + fullSourceName := internalRegistry + ctrlcommon.MCONamespace + "/" + sourceImageStreamName + "@" + imageName + + var tag *imagev1.ImageStreamTag + tag, err := ctrl.imageclient.ImageV1().ImageStreamTags(ctrlcommon.MCONamespace).Get(context.TODO(), fullTargetTagName, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + + it := &imagev1.ImageStreamTag{ + + ObjectMeta: metav1.ObjectMeta{ + Name: fullTargetTagName, + Namespace: ctrlcommon.MCONamespace, + }, + Tag: &imagev1.TagReference{ + Name: tagName, + From: &corev1.ObjectReference{ + Kind: "ImageStreamImage", + Namespace: ctrlcommon.MCONamespace, + Name: fullSourceName, + }, + ReferencePolicy: imagev1.TagReferencePolicy{ + Type: imagev1.SourceTagReferencePolicy, + }, + }, + } + glog.Infof("Tagging image %s from %s into imagestream %s", imageName+":"+tagName, sourceImageStreamName, targetImageStreamName) + _, err = ctrl.imageclient.ImageV1().ImageStreamTags(ctrlcommon.MCONamespace).Create(context.TODO(), it, metav1.CreateOptions{}) + return err + + } + return err + + } + tag.Tag.From.Name = fullSourceName + glog.Infof("Updating image tag %s from %s into imagestream %s", imageName+":"+tagName, sourceImageStreamName, targetImageStreamName) + + _, err = ctrl.imageclient.ImageV1().ImageStreamTags(ctrlcommon.MCONamespace).Update(context.TODO(), tag, metav1.UpdateOptions{}) + return err + +} + +func (ctrl *Controller) cheatMachineConfigLabelIntoBuildConfig(imageStream *imagev1.ImageStream, pool *mcfgv1.MachineConfigPool) error { + // This is the mco content imagestream + latestImageTag := ctrl.getMostRecentImageTagForImageStream(imageStream, "latest") + if latestImageTag == nil { + return fmt.Errorf("No 'latest' image tag in imagestream %s: ", imageStream.Name) + + } + labels, err := ctrl.getLabelsForImageRef(latestImageTag.Image) + if err != nil { + return fmt.Errorf("Failed to retrieve labels for imagestream tag %s: %w", latestImageTag.DockerImageReference, err) + } + + buildConfig, err := ctrl.bcLister.BuildConfigs(ctrlcommon.MCONamespace).Get(pool.Name + "-build" + ctrlcommon.ImageStreamSuffixMCOContent) + if err != nil { + return fmt.Errorf("Failed to retrieve corresponding buildconfig: %w", err) + } + + // Get buildconfig + _, err = ctrl.updateBuildConfigWithLabels(buildConfig, labels) + if err != nil { + return fmt.Errorf("Failed to update buildconfig %s with labels: %w", buildConfig.Name, err) + } + + return nil +} diff --git a/pkg/controller/build/pool_build_resources.go b/pkg/controller/build/pool_build_resources.go index 1d1fdd95bc..881d21b696 100644 --- a/pkg/controller/build/pool_build_resources.go +++ b/pkg/controller/build/pool_build_resources.go @@ -19,12 +19,20 @@ type PoolResourceNames struct { } type PoolImageStreamList struct { - Base string + // Base image for all of our builds + Base string + // Base image supplied externally, takes precedence over default CoreOS stream + ExternalBase string + // Where the render controller renders its tiny config image RenderedConfig string - Content string - CustomContent string - External string - PerNode string + // Where the MCO outputs its multi-stage build with machineconfig to + Content string + // Where the image goes if a user uses the custom buildconfig + CustomContent string + // Hypothetically if you built a working image outside the cluster, you would tag it here + External string + // Where we look for a per-node config image + PerNode string } type PoolBuildConfigList struct { @@ -44,6 +52,7 @@ func PoolBuildResources(pool *mcfgv1.MachineConfigPool) *PoolResourceNames { pisl := PoolImageStreamList{ Base: pool.Name + ctrlcommon.ImageStreamSuffixCoreOS, + ExternalBase: pool.Name + ctrlcommon.ImageStreamSuffixExternalBase, RenderedConfig: pool.Name + ctrlcommon.ImageStreamSuffixRenderedConfig, Content: pool.Name + ctrlcommon.ImageStreamSuffixMCOContent, CustomContent: pool.Name + ctrlcommon.ImageStreamSuffixMCOContentCustom, From 010a23a1efd45331468bb7db75e2e5780aa8efed Mon Sep 17 00:00:00 2001 From: John Kyros <79665180+jkyros@users.noreply.github.com> Date: Tue, 19 Apr 2022 18:40:34 -0500 Subject: [PATCH 3/9] controller/common: image stream helpers and consts This adds helpers for getting and setting the current image stream for a pool, separates the per-pool suffix '-base' from the global 'coreos' imagestream, and adds a per-node stream name constant for later. --- pkg/controller/common/constants.go | 31 ++++++++++++++++++++++++++++++ pkg/controller/common/helpers.go | 11 +++++++++++ 2 files changed, 42 insertions(+) diff --git a/pkg/controller/common/constants.go b/pkg/controller/common/constants.go index 12177c3618..c34993c232 100644 --- a/pkg/controller/common/constants.go +++ b/pkg/controller/common/constants.go @@ -41,4 +41,35 @@ const ( MachineConfigPoolMaster = "master" // MachineConfigPoolWorker is the MachineConfigPool name given to the worker MachineConfigPoolWorker = "worker" + + // ExperimentalLayeringPoolLabel is the label that enables the "layered" workflow path for a pool + ExperimentalLayeringPoolLabel = "machineconfiguration.openshift.io/layered" + + // ExperimentalLayeringPoolImageStreamLabel is the label that enables tells the pool which imagestream to grab images out of + ExperimentalLayeringPoolImageStreamLabel = "machineconfiguration.openshift.io/selected-image-stream" + // ExperimentalNewestLayeredImageAnnotationKey is the annotation that signifies the newest image that has been pushed to a machine + // config pool's imagestream + ExperimentalNewestLayeredImageAnnotationKey = "machineconfiguration.openshift.io/newestImage" + // ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey is the annotation that signifies which rendered config + // the latest image in the pool's imagestream is equivalent to + ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey = "machineconfiguration.openshift.io/newestImageEquivalentConfig" + + // CoreOSImageStreamName is the name of the global (non-pool-specific) default base image stream name that pools will draw from if they + // do not have any other base image specified + CoreOSImageStreamName = "coreos" + + // ImageStreamSuffixCoreOS is the suffix for the imagestream holding the base coreos image + ImageStreamSuffixCoreOS = "-base" + // ImageStreamSuffixExternalBase is the suffix for a pool imagestream where the base image comes from outside the cluster + ImageStreamSuffixExternalBase = "-external-base" + // ImageStreamSuffixRenderedConfig is the suffix for a pool imagestream where the mco also writes its rendered machineconfig + ImageStreamSuffixRenderedConfig = "-rendered-config" + // ImageStreamSuffixMCOContent is the suffix for a pool imagestream where the mco has applied its rendered-config to the base image + ImageStreamSuffixMCOContent = "-mco-content" + // ImageStreamSuffixMcoContentCustom is the suffix for a pool imagestream where content has been customized by a user + ImageStreamSuffixMCOContentCustom = "-mco-content-custom" + // ImageStreamSuffixMCOContentExternal is the suffix for a pool imagestream where content comes from outside the cluster + ImageStreamSuffixMCOContentExternal = "-mco-content-external" + // ImageStreamSuffixMCOContentPerNode is the suffix for a pool imagestream containing any per-node images + ImageStreamSuffixMCOContentPerNode = "-mco-content-per-node" ) diff --git a/pkg/controller/common/helpers.go b/pkg/controller/common/helpers.go index ff348f24e7..d5c4d2dc18 100644 --- a/pkg/controller/common/helpers.go +++ b/pkg/controller/common/helpers.go @@ -1170,3 +1170,14 @@ func (n namespacedEventRecorder) Eventf(object runtime.Object, eventtype, reason func (n namespacedEventRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { n.delegate.AnnotatedEventf(ensureEventNamespace(object), annotations, eventtype, reason, messageFmt, args...) } + +func GetPoolImageStream(pool *mcfgv1.MachineConfigPool) (string, error) { + if imagestream, ok := pool.Labels[ExperimentalLayeringPoolImageStreamLabel]; ok { + return imagestream, nil + } + return "", fmt.Errorf("No ImageStream found for pool %s", pool.Name) +} + +func SetPoolImageStream(pool *mcfgv1.MachineConfigPool, imageStreamName string) { + pool.Labels[ExperimentalLayeringPoolImageStreamLabel] = imageStreamName +} From d27c3dfc4bc8cbb2decb0a6e04c796459f114f9b Mon Sep 17 00:00:00 2001 From: John Kyros <79665180+jkyros@users.noreply.github.com> Date: Fri, 18 Mar 2022 00:43:45 -0500 Subject: [PATCH 4/9] Adds some constants for layering and a helper. I don't know that we'll ultimately end up with imagestreams with these names but for right now, I think these are at least the ones we think we want that have purposes behind them. --- pkg/controller/common/constants.go | 3 +++ pkg/controller/common/helpers.go | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/pkg/controller/common/constants.go b/pkg/controller/common/constants.go index c34993c232..864871fa84 100644 --- a/pkg/controller/common/constants.go +++ b/pkg/controller/common/constants.go @@ -47,6 +47,7 @@ const ( // ExperimentalLayeringPoolImageStreamLabel is the label that enables tells the pool which imagestream to grab images out of ExperimentalLayeringPoolImageStreamLabel = "machineconfiguration.openshift.io/selected-image-stream" + // ExperimentalNewestLayeredImageAnnotationKey is the annotation that signifies the newest image that has been pushed to a machine // config pool's imagestream ExperimentalNewestLayeredImageAnnotationKey = "machineconfiguration.openshift.io/newestImage" @@ -62,6 +63,7 @@ const ( ImageStreamSuffixCoreOS = "-base" // ImageStreamSuffixExternalBase is the suffix for a pool imagestream where the base image comes from outside the cluster ImageStreamSuffixExternalBase = "-external-base" + // ImageStreamSuffixRenderedConfig is the suffix for a pool imagestream where the mco also writes its rendered machineconfig ImageStreamSuffixRenderedConfig = "-rendered-config" // ImageStreamSuffixMCOContent is the suffix for a pool imagestream where the mco has applied its rendered-config to the base image @@ -70,6 +72,7 @@ const ( ImageStreamSuffixMCOContentCustom = "-mco-content-custom" // ImageStreamSuffixMCOContentExternal is the suffix for a pool imagestream where content comes from outside the cluster ImageStreamSuffixMCOContentExternal = "-mco-content-external" + // ImageStreamSuffixMCOContentPerNode is the suffix for a pool imagestream containing any per-node images ImageStreamSuffixMCOContentPerNode = "-mco-content-per-node" ) diff --git a/pkg/controller/common/helpers.go b/pkg/controller/common/helpers.go index d5c4d2dc18..f5b9c6eca6 100644 --- a/pkg/controller/common/helpers.go +++ b/pkg/controller/common/helpers.go @@ -1181,3 +1181,10 @@ func GetPoolImageStream(pool *mcfgv1.MachineConfigPool) (string, error) { func SetPoolImageStream(pool *mcfgv1.MachineConfigPool, imageStreamName string) { pool.Labels[ExperimentalLayeringPoolImageStreamLabel] = imageStreamName } + +func IsLayeredPool(pool *mcfgv1.MachineConfigPool) bool { + if _, ok := pool.Labels[ExperimentalLayeringPoolLabel]; ok { + return true + } + return false +} From c20970c51853aedabbf5e6766e810d7c49a9775d Mon Sep 17 00:00:00 2001 From: Zack Zlotnik Date: Thu, 6 Apr 2023 16:17:16 -0400 Subject: [PATCH 5/9] we longer have 'legacy' OS containers --- pkg/controller/build/build_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/build/build_controller.go b/pkg/controller/build/build_controller.go index 1059950558..6b4b6ae170 100644 --- a/pkg/controller/build/build_controller.go +++ b/pkg/controller/build/build_controller.go @@ -983,7 +983,7 @@ func (ctrl *Controller) ensureCoreOSImageStream() (*imagev1.ImageStream, error) Name: "latest", From: &corev1.ObjectReference{ Kind: "DockerImage", - Name: controllerConfig.Spec.BaseOperatingSystemContainer, + Name: controllerConfig.Spec.OSImageURL, }, }, }, From a2cf4589c4529cf5050cdc51276b77aff40ff74a Mon Sep 17 00:00:00 2001 From: Zack Zlotnik Date: Thu, 6 Apr 2023 16:01:21 -0400 Subject: [PATCH 6/9] vendor deps --- .../build/clientset/versioned/clientset.go | 105 ++++++++++ .../build/clientset/versioned/doc.go | 4 + .../versioned/fake/clientset_generated.go | 69 ++++++ .../build/clientset/versioned/fake/doc.go | 4 + .../clientset/versioned/fake/register.go | 40 ++++ .../versioned/typed/build/v1/fake/doc.go | 4 + .../typed/build/v1/fake/fake_build.go | 196 ++++++++++++++++++ .../typed/build/v1/fake/fake_build_client.go | 28 +++ .../typed/build/v1/fake/fake_buildconfig.go | 185 +++++++++++++++++ .../externalversions/build/interface.go | 30 +++ .../externalversions/build/v1/build.go | 74 +++++++ .../externalversions/build/v1/buildconfig.go | 74 +++++++ .../externalversions/build/v1/interface.go | 36 ++++ .../informers/externalversions/factory.go | 164 +++++++++++++++ .../informers/externalversions/generic.go | 48 +++++ .../internalinterfaces/factory_interfaces.go | 24 +++ .../client-go/build/listers/build/v1/build.go | 83 ++++++++ .../build/listers/build/v1/buildconfig.go | 83 ++++++++ .../listers/build/v1/expansion_generated.go | 19 ++ .../image/clientset/versioned/clientset.go | 105 ++++++++++ .../image/clientset/versioned/doc.go | 4 + .../versioned/fake/clientset_generated.go | 69 ++++++ .../image/clientset/versioned/fake/doc.go | 4 + .../clientset/versioned/fake/register.go | 40 ++++ .../versioned/typed/image/v1/fake/doc.go | 4 + .../typed/image/v1/fake/fake_image.go | 130 ++++++++++++ .../typed/image/v1/fake/fake_image_client.go | 52 +++++ .../image/v1/fake/fake_imagesignature.go | 38 ++++ .../typed/image/v1/fake/fake_imagestream.go | 196 ++++++++++++++++++ .../image/v1/fake/fake_imagestreamimage.go | 33 +++ .../image/v1/fake/fake_imagestreamimport.go | 33 +++ .../image/v1/fake/fake_imagestreammapping.go | 59 ++++++ .../image/v1/fake/fake_imagestreamtag.go | 86 ++++++++ .../typed/image/v1/fake/fake_imagetag.go | 86 ++++++++ .../informers/externalversions/factory.go | 164 +++++++++++++++ .../informers/externalversions/generic.go | 48 +++++ .../externalversions/image/interface.go | 30 +++ .../externalversions/image/v1/image.go | 73 +++++++ .../externalversions/image/v1/imagestream.go | 74 +++++++ .../externalversions/image/v1/interface.go | 36 ++++ .../internalinterfaces/factory_interfaces.go | 24 +++ .../listers/image/v1/expansion_generated.go | 31 +++ .../client-go/image/listers/image/v1/image.go | 52 +++++ .../image/listers/image/v1/imagestream.go | 83 ++++++++ .../image/listers/image/v1/imagestreamtag.go | 83 ++++++++ .../image/listers/image/v1/imagetag.go | 83 ++++++++ vendor/modules.txt | 16 ++ 47 files changed, 3006 insertions(+) create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/fake/clientset_generated.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/fake/doc.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/fake/register.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/doc.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build_client.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_buildconfig.go create mode 100644 vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go create mode 100644 vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go create mode 100644 vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go create mode 100644 vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go create mode 100644 vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go create mode 100644 vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go create mode 100644 vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 vendor/github.com/openshift/client-go/build/listers/build/v1/build.go create mode 100644 vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go create mode 100644 vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/fake/clientset_generated.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/fake/doc.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/fake/register.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/doc.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image_client.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagesignature.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimage.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimport.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreammapping.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamtag.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagetag.go create mode 100644 vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go create mode 100644 vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go create mode 100644 vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go create mode 100644 vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go create mode 100644 vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go create mode 100644 vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go create mode 100644 vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go create mode 100644 vendor/github.com/openshift/client-go/image/listers/image/v1/image.go create mode 100644 vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go create mode 100644 vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go create mode 100644 vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go new file mode 100644 index 0000000000..d7c9c69806 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go @@ -0,0 +1,105 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + buildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + BuildV1() buildv1.BuildV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + buildV1 *buildv1.BuildV1Client +} + +// BuildV1 retrieves the BuildV1Client +func (c *Clientset) BuildV1() buildv1.BuildV1Interface { + return c.buildV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.buildV1, err = buildv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.buildV1 = buildv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go new file mode 100644 index 0000000000..0e0c2a8900 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..7070b0ed07 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,69 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/openshift/client-go/build/clientset/versioned" + buildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" + fakebuildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// BuildV1 retrieves the BuildV1Client +func (c *Clientset) BuildV1() buildv1.BuildV1Interface { + return &fakebuildv1.FakeBuildV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..3630ed1cd1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..c120bd193a --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + buildv1 "github.com/openshift/api/build/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + buildv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/doc.go new file mode 100644 index 0000000000..2b5ba4c8e4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build.go new file mode 100644 index 0000000000..ea364d7eff --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build.go @@ -0,0 +1,196 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + buildv1 "github.com/openshift/api/build/v1" + applyconfigurationsbuildv1 "github.com/openshift/client-go/build/applyconfigurations/build/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeBuilds implements BuildInterface +type FakeBuilds struct { + Fake *FakeBuildV1 + ns string +} + +var buildsResource = schema.GroupVersionResource{Group: "build.openshift.io", Version: "v1", Resource: "builds"} + +var buildsKind = schema.GroupVersionKind{Group: "build.openshift.io", Version: "v1", Kind: "Build"} + +// Get takes name of the build, and returns the corresponding build object, and an error if there is any. +func (c *FakeBuilds) Get(ctx context.Context, name string, options v1.GetOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(buildsResource, c.ns, name), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// List takes label and field selectors, and returns the list of Builds that match those selectors. +func (c *FakeBuilds) List(ctx context.Context, opts v1.ListOptions) (result *buildv1.BuildList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(buildsResource, buildsKind, c.ns, opts), &buildv1.BuildList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &buildv1.BuildList{ListMeta: obj.(*buildv1.BuildList).ListMeta} + for _, item := range obj.(*buildv1.BuildList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested builds. +func (c *FakeBuilds) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(buildsResource, c.ns, opts)) + +} + +// Create takes the representation of a build and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) Create(ctx context.Context, build *buildv1.Build, opts v1.CreateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(buildsResource, c.ns, build), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// Update takes the representation of a build and updates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) Update(ctx context.Context, build *buildv1.Build, opts v1.UpdateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(buildsResource, c.ns, build), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeBuilds) UpdateStatus(ctx context.Context, build *buildv1.Build, opts v1.UpdateOptions) (*buildv1.Build, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(buildsResource, "status", c.ns, build), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// Delete takes name of the build and deletes it. Returns an error if one occurs. +func (c *FakeBuilds) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(buildsResource, c.ns, name, opts), &buildv1.Build{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeBuilds) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(buildsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &buildv1.BuildList{}) + return err +} + +// Patch applies the patch and returns the patched build. +func (c *FakeBuilds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildsResource, c.ns, name, pt, data, subresources...), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied build. +func (c *FakeBuilds) Apply(ctx context.Context, build *applyconfigurationsbuildv1.BuildApplyConfiguration, opts v1.ApplyOptions) (result *buildv1.Build, err error) { + if build == nil { + return nil, fmt.Errorf("build provided to Apply must not be nil") + } + data, err := json.Marshal(build) + if err != nil { + return nil, err + } + name := build.Name + if name == nil { + return nil, fmt.Errorf("build.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildsResource, c.ns, *name, types.ApplyPatchType, data), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeBuilds) ApplyStatus(ctx context.Context, build *applyconfigurationsbuildv1.BuildApplyConfiguration, opts v1.ApplyOptions) (result *buildv1.Build, err error) { + if build == nil { + return nil, fmt.Errorf("build provided to Apply must not be nil") + } + data, err := json.Marshal(build) + if err != nil { + return nil, err + } + name := build.Name + if name == nil { + return nil, fmt.Errorf("build.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// UpdateDetails takes the representation of a build and updates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) UpdateDetails(ctx context.Context, buildName string, build *buildv1.Build, opts v1.UpdateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(buildsResource, "details", c.ns, build), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// Clone takes the representation of a buildRequest and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) Clone(ctx context.Context, buildName string, buildRequest *buildv1.BuildRequest, opts v1.CreateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateSubresourceAction(buildsResource, buildName, "clone", c.ns, buildRequest), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build_client.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build_client.go new file mode 100644 index 0000000000..31cdb947c4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build_client.go @@ -0,0 +1,28 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeBuildV1 struct { + *testing.Fake +} + +func (c *FakeBuildV1) Builds(namespace string) v1.BuildInterface { + return &FakeBuilds{c, namespace} +} + +func (c *FakeBuildV1) BuildConfigs(namespace string) v1.BuildConfigInterface { + return &FakeBuildConfigs{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeBuildV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_buildconfig.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_buildconfig.go new file mode 100644 index 0000000000..44e79e3773 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_buildconfig.go @@ -0,0 +1,185 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + buildv1 "github.com/openshift/api/build/v1" + applyconfigurationsbuildv1 "github.com/openshift/client-go/build/applyconfigurations/build/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeBuildConfigs implements BuildConfigInterface +type FakeBuildConfigs struct { + Fake *FakeBuildV1 + ns string +} + +var buildconfigsResource = schema.GroupVersionResource{Group: "build.openshift.io", Version: "v1", Resource: "buildconfigs"} + +var buildconfigsKind = schema.GroupVersionKind{Group: "build.openshift.io", Version: "v1", Kind: "BuildConfig"} + +// Get takes name of the buildConfig, and returns the corresponding buildConfig object, and an error if there is any. +func (c *FakeBuildConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *buildv1.BuildConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(buildconfigsResource, c.ns, name), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// List takes label and field selectors, and returns the list of BuildConfigs that match those selectors. +func (c *FakeBuildConfigs) List(ctx context.Context, opts v1.ListOptions) (result *buildv1.BuildConfigList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(buildconfigsResource, buildconfigsKind, c.ns, opts), &buildv1.BuildConfigList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &buildv1.BuildConfigList{ListMeta: obj.(*buildv1.BuildConfigList).ListMeta} + for _, item := range obj.(*buildv1.BuildConfigList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested buildConfigs. +func (c *FakeBuildConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(buildconfigsResource, c.ns, opts)) + +} + +// Create takes the representation of a buildConfig and creates it. Returns the server's representation of the buildConfig, and an error, if there is any. +func (c *FakeBuildConfigs) Create(ctx context.Context, buildConfig *buildv1.BuildConfig, opts v1.CreateOptions) (result *buildv1.BuildConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(buildconfigsResource, c.ns, buildConfig), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// Update takes the representation of a buildConfig and updates it. Returns the server's representation of the buildConfig, and an error, if there is any. +func (c *FakeBuildConfigs) Update(ctx context.Context, buildConfig *buildv1.BuildConfig, opts v1.UpdateOptions) (result *buildv1.BuildConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(buildconfigsResource, c.ns, buildConfig), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeBuildConfigs) UpdateStatus(ctx context.Context, buildConfig *buildv1.BuildConfig, opts v1.UpdateOptions) (*buildv1.BuildConfig, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(buildconfigsResource, "status", c.ns, buildConfig), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// Delete takes name of the buildConfig and deletes it. Returns an error if one occurs. +func (c *FakeBuildConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(buildconfigsResource, c.ns, name, opts), &buildv1.BuildConfig{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeBuildConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(buildconfigsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &buildv1.BuildConfigList{}) + return err +} + +// Patch applies the patch and returns the patched buildConfig. +func (c *FakeBuildConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *buildv1.BuildConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildconfigsResource, c.ns, name, pt, data, subresources...), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied buildConfig. +func (c *FakeBuildConfigs) Apply(ctx context.Context, buildConfig *applyconfigurationsbuildv1.BuildConfigApplyConfiguration, opts v1.ApplyOptions) (result *buildv1.BuildConfig, err error) { + if buildConfig == nil { + return nil, fmt.Errorf("buildConfig provided to Apply must not be nil") + } + data, err := json.Marshal(buildConfig) + if err != nil { + return nil, err + } + name := buildConfig.Name + if name == nil { + return nil, fmt.Errorf("buildConfig.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildconfigsResource, c.ns, *name, types.ApplyPatchType, data), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeBuildConfigs) ApplyStatus(ctx context.Context, buildConfig *applyconfigurationsbuildv1.BuildConfigApplyConfiguration, opts v1.ApplyOptions) (result *buildv1.BuildConfig, err error) { + if buildConfig == nil { + return nil, fmt.Errorf("buildConfig provided to Apply must not be nil") + } + data, err := json.Marshal(buildConfig) + if err != nil { + return nil, err + } + name := buildConfig.Name + if name == nil { + return nil, fmt.Errorf("buildConfig.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildconfigsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// Instantiate takes the representation of a buildRequest and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuildConfigs) Instantiate(ctx context.Context, buildConfigName string, buildRequest *buildv1.BuildRequest, opts v1.CreateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateSubresourceAction(buildconfigsResource, buildConfigName, "instantiate", c.ns, buildRequest), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go new file mode 100644 index 0000000000..01a651928a --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package build + +import ( + v1 "github.com/openshift/client-go/build/informers/externalversions/build/v1" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go new file mode 100644 index 0000000000..2055ed96f5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + buildv1 "github.com/openshift/api/build/v1" + versioned "github.com/openshift/client-go/build/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/build/listers/build/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BuildInformer provides access to a shared informer and lister for +// Builds. +type BuildInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.BuildLister +} + +type buildInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBuildInformer constructs a new informer for Build type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBuildInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBuildInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBuildInformer constructs a new informer for Build type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBuildInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().Builds(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().Builds(namespace).Watch(context.TODO(), options) + }, + }, + &buildv1.Build{}, + resyncPeriod, + indexers, + ) +} + +func (f *buildInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBuildInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *buildInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&buildv1.Build{}, f.defaultInformer) +} + +func (f *buildInformer) Lister() v1.BuildLister { + return v1.NewBuildLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go new file mode 100644 index 0000000000..28012f8c6d --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + buildv1 "github.com/openshift/api/build/v1" + versioned "github.com/openshift/client-go/build/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/build/listers/build/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BuildConfigInformer provides access to a shared informer and lister for +// BuildConfigs. +type BuildConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.BuildConfigLister +} + +type buildConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBuildConfigInformer constructs a new informer for BuildConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBuildConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBuildConfigInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBuildConfigInformer constructs a new informer for BuildConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBuildConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().BuildConfigs(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().BuildConfigs(namespace).Watch(context.TODO(), options) + }, + }, + &buildv1.BuildConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *buildConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBuildConfigInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *buildConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&buildv1.BuildConfig{}, f.defaultInformer) +} + +func (f *buildConfigInformer) Lister() v1.BuildConfigLister { + return v1.NewBuildConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go new file mode 100644 index 0000000000..da69fc9bb6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go @@ -0,0 +1,36 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Builds returns a BuildInformer. + Builds() BuildInformer + // BuildConfigs returns a BuildConfigInformer. + BuildConfigs() BuildConfigInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Builds returns a BuildInformer. +func (v *version) Builds() BuildInformer { + return &buildInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// BuildConfigs returns a BuildConfigInformer. +func (v *version) BuildConfigs() BuildConfigInformer { + return &buildConfigInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go new file mode 100644 index 0000000000..fadac908e0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go @@ -0,0 +1,164 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/build/clientset/versioned" + build "github.com/openshift/client-go/build/informers/externalversions/build" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Build() build.Interface +} + +func (f *sharedInformerFactory) Build() build.Interface { + return build.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go new file mode 100644 index 0000000000..e8b2035b70 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go @@ -0,0 +1,48 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/openshift/api/build/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=build.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("builds"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Build().V1().Builds().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("buildconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Build().V1().BuildConfigs().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..1bcbd5975a --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/build/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go new file mode 100644 index 0000000000..e072f9bac8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// BuildLister helps list Builds. +// All objects returned here must be treated as read-only. +type BuildLister interface { + // List lists all Builds in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Build, err error) + // Builds returns an object that can list and get Builds. + Builds(namespace string) BuildNamespaceLister + BuildListerExpansion +} + +// buildLister implements the BuildLister interface. +type buildLister struct { + indexer cache.Indexer +} + +// NewBuildLister returns a new BuildLister. +func NewBuildLister(indexer cache.Indexer) BuildLister { + return &buildLister{indexer: indexer} +} + +// List lists all Builds in the indexer. +func (s *buildLister) List(selector labels.Selector) (ret []*v1.Build, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Build)) + }) + return ret, err +} + +// Builds returns an object that can list and get Builds. +func (s *buildLister) Builds(namespace string) BuildNamespaceLister { + return buildNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// BuildNamespaceLister helps list and get Builds. +// All objects returned here must be treated as read-only. +type BuildNamespaceLister interface { + // List lists all Builds in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Build, err error) + // Get retrieves the Build from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Build, error) + BuildNamespaceListerExpansion +} + +// buildNamespaceLister implements the BuildNamespaceLister +// interface. +type buildNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Builds in the indexer for a given namespace. +func (s buildNamespaceLister) List(selector labels.Selector) (ret []*v1.Build, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Build)) + }) + return ret, err +} + +// Get retrieves the Build from the indexer for a given namespace and name. +func (s buildNamespaceLister) Get(name string) (*v1.Build, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("build"), name) + } + return obj.(*v1.Build), nil +} diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go new file mode 100644 index 0000000000..d2bbdb4ec6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// BuildConfigLister helps list BuildConfigs. +// All objects returned here must be treated as read-only. +type BuildConfigLister interface { + // List lists all BuildConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.BuildConfig, err error) + // BuildConfigs returns an object that can list and get BuildConfigs. + BuildConfigs(namespace string) BuildConfigNamespaceLister + BuildConfigListerExpansion +} + +// buildConfigLister implements the BuildConfigLister interface. +type buildConfigLister struct { + indexer cache.Indexer +} + +// NewBuildConfigLister returns a new BuildConfigLister. +func NewBuildConfigLister(indexer cache.Indexer) BuildConfigLister { + return &buildConfigLister{indexer: indexer} +} + +// List lists all BuildConfigs in the indexer. +func (s *buildConfigLister) List(selector labels.Selector) (ret []*v1.BuildConfig, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BuildConfig)) + }) + return ret, err +} + +// BuildConfigs returns an object that can list and get BuildConfigs. +func (s *buildConfigLister) BuildConfigs(namespace string) BuildConfigNamespaceLister { + return buildConfigNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// BuildConfigNamespaceLister helps list and get BuildConfigs. +// All objects returned here must be treated as read-only. +type BuildConfigNamespaceLister interface { + // List lists all BuildConfigs in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.BuildConfig, err error) + // Get retrieves the BuildConfig from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.BuildConfig, error) + BuildConfigNamespaceListerExpansion +} + +// buildConfigNamespaceLister implements the BuildConfigNamespaceLister +// interface. +type buildConfigNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all BuildConfigs in the indexer for a given namespace. +func (s buildConfigNamespaceLister) List(selector labels.Selector) (ret []*v1.BuildConfig, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BuildConfig)) + }) + return ret, err +} + +// Get retrieves the BuildConfig from the indexer for a given namespace and name. +func (s buildConfigNamespaceLister) Get(name string) (*v1.BuildConfig, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("buildconfig"), name) + } + return obj.(*v1.BuildConfig), nil +} diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go new file mode 100644 index 0000000000..1fc9faecdd --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go @@ -0,0 +1,19 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// BuildListerExpansion allows custom methods to be added to +// BuildLister. +type BuildListerExpansion interface{} + +// BuildNamespaceListerExpansion allows custom methods to be added to +// BuildNamespaceLister. +type BuildNamespaceListerExpansion interface{} + +// BuildConfigListerExpansion allows custom methods to be added to +// BuildConfigLister. +type BuildConfigListerExpansion interface{} + +// BuildConfigNamespaceListerExpansion allows custom methods to be added to +// BuildConfigNamespaceLister. +type BuildConfigNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go new file mode 100644 index 0000000000..b0ebcebf51 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go @@ -0,0 +1,105 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + imagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + ImageV1() imagev1.ImageV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + imageV1 *imagev1.ImageV1Client +} + +// ImageV1 retrieves the ImageV1Client +func (c *Clientset) ImageV1() imagev1.ImageV1Interface { + return c.imageV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.imageV1, err = imagev1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.imageV1 = imagev1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go new file mode 100644 index 0000000000..0e0c2a8900 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..dfb57b4e10 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,69 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/openshift/client-go/image/clientset/versioned" + imagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + fakeimagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// ImageV1 retrieves the ImageV1Client +func (c *Clientset) ImageV1() imagev1.ImageV1Interface { + return &fakeimagev1.FakeImageV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..3630ed1cd1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..d7efdf27ee --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + imagev1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/doc.go new file mode 100644 index 0000000000..2b5ba4c8e4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image.go new file mode 100644 index 0000000000..c32387af93 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image.go @@ -0,0 +1,130 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + imagev1 "github.com/openshift/api/image/v1" + applyconfigurationsimagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImages implements ImageInterface +type FakeImages struct { + Fake *FakeImageV1 +} + +var imagesResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "images"} + +var imagesKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "Image"} + +// Get takes name of the image, and returns the corresponding image object, and an error if there is any. +func (c *FakeImages) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(imagesResource, name), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} + +// List takes label and field selectors, and returns the list of Images that match those selectors. +func (c *FakeImages) List(ctx context.Context, opts v1.ListOptions) (result *imagev1.ImageList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(imagesResource, imagesKind, opts), &imagev1.ImageList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &imagev1.ImageList{ListMeta: obj.(*imagev1.ImageList).ListMeta} + for _, item := range obj.(*imagev1.ImageList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested images. +func (c *FakeImages) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(imagesResource, opts)) +} + +// Create takes the representation of a image and creates it. Returns the server's representation of the image, and an error, if there is any. +func (c *FakeImages) Create(ctx context.Context, image *imagev1.Image, opts v1.CreateOptions) (result *imagev1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(imagesResource, image), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} + +// Update takes the representation of a image and updates it. Returns the server's representation of the image, and an error, if there is any. +func (c *FakeImages) Update(ctx context.Context, image *imagev1.Image, opts v1.UpdateOptions) (result *imagev1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(imagesResource, image), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} + +// Delete takes name of the image and deletes it. Returns an error if one occurs. +func (c *FakeImages) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(imagesResource, name, opts), &imagev1.Image{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImages) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(imagesResource, listOpts) + + _, err := c.Fake.Invokes(action, &imagev1.ImageList{}) + return err +} + +// Patch applies the patch and returns the patched image. +func (c *FakeImages) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *imagev1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagesResource, name, pt, data, subresources...), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied image. +func (c *FakeImages) Apply(ctx context.Context, image *applyconfigurationsimagev1.ImageApplyConfiguration, opts v1.ApplyOptions) (result *imagev1.Image, err error) { + if image == nil { + return nil, fmt.Errorf("image provided to Apply must not be nil") + } + data, err := json.Marshal(image) + if err != nil { + return nil, err + } + name := image.Name + if name == nil { + return nil, fmt.Errorf("image.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagesResource, *name, types.ApplyPatchType, data), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image_client.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image_client.go new file mode 100644 index 0000000000..c135a79bb6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image_client.go @@ -0,0 +1,52 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeImageV1 struct { + *testing.Fake +} + +func (c *FakeImageV1) Images() v1.ImageInterface { + return &FakeImages{c} +} + +func (c *FakeImageV1) ImageSignatures() v1.ImageSignatureInterface { + return &FakeImageSignatures{c} +} + +func (c *FakeImageV1) ImageStreams(namespace string) v1.ImageStreamInterface { + return &FakeImageStreams{c, namespace} +} + +func (c *FakeImageV1) ImageStreamImages(namespace string) v1.ImageStreamImageInterface { + return &FakeImageStreamImages{c, namespace} +} + +func (c *FakeImageV1) ImageStreamImports(namespace string) v1.ImageStreamImportInterface { + return &FakeImageStreamImports{c, namespace} +} + +func (c *FakeImageV1) ImageStreamMappings(namespace string) v1.ImageStreamMappingInterface { + return &FakeImageStreamMappings{c, namespace} +} + +func (c *FakeImageV1) ImageStreamTags(namespace string) v1.ImageStreamTagInterface { + return &FakeImageStreamTags{c, namespace} +} + +func (c *FakeImageV1) ImageTags(namespace string) v1.ImageTagInterface { + return &FakeImageTags{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeImageV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagesignature.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagesignature.go new file mode 100644 index 0000000000..0ff22e2fd2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagesignature.go @@ -0,0 +1,38 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/openshift/api/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageSignatures implements ImageSignatureInterface +type FakeImageSignatures struct { + Fake *FakeImageV1 +} + +var imagesignaturesResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagesignatures"} + +var imagesignaturesKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageSignature"} + +// Create takes the representation of a imageSignature and creates it. Returns the server's representation of the imageSignature, and an error, if there is any. +func (c *FakeImageSignatures) Create(ctx context.Context, imageSignature *v1.ImageSignature, opts metav1.CreateOptions) (result *v1.ImageSignature, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(imagesignaturesResource, imageSignature), &v1.ImageSignature{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ImageSignature), err +} + +// Delete takes name of the imageSignature and deletes it. Returns an error if one occurs. +func (c *FakeImageSignatures) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(imagesignaturesResource, name, opts), &v1.ImageSignature{}) + return err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go new file mode 100644 index 0000000000..7db6c8a822 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go @@ -0,0 +1,196 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + imagev1 "github.com/openshift/api/image/v1" + applyconfigurationsimagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreams implements ImageStreamInterface +type FakeImageStreams struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreams"} + +var imagestreamsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStream"} + +// Get takes name of the imageStream, and returns the corresponding imageStream object, and an error if there is any. +func (c *FakeImageStreams) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageStream, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagestreamsResource, c.ns, name), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// List takes label and field selectors, and returns the list of ImageStreams that match those selectors. +func (c *FakeImageStreams) List(ctx context.Context, opts v1.ListOptions) (result *imagev1.ImageStreamList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(imagestreamsResource, imagestreamsKind, c.ns, opts), &imagev1.ImageStreamList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &imagev1.ImageStreamList{ListMeta: obj.(*imagev1.ImageStreamList).ListMeta} + for _, item := range obj.(*imagev1.ImageStreamList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested imageStreams. +func (c *FakeImageStreams) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(imagestreamsResource, c.ns, opts)) + +} + +// Create takes the representation of a imageStream and creates it. Returns the server's representation of the imageStream, and an error, if there is any. +func (c *FakeImageStreams) Create(ctx context.Context, imageStream *imagev1.ImageStream, opts v1.CreateOptions) (result *imagev1.ImageStream, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagestreamsResource, c.ns, imageStream), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// Update takes the representation of a imageStream and updates it. Returns the server's representation of the imageStream, and an error, if there is any. +func (c *FakeImageStreams) Update(ctx context.Context, imageStream *imagev1.ImageStream, opts v1.UpdateOptions) (result *imagev1.ImageStream, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(imagestreamsResource, c.ns, imageStream), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeImageStreams) UpdateStatus(ctx context.Context, imageStream *imagev1.ImageStream, opts v1.UpdateOptions) (*imagev1.ImageStream, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(imagestreamsResource, "status", c.ns, imageStream), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// Delete takes name of the imageStream and deletes it. Returns an error if one occurs. +func (c *FakeImageStreams) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(imagestreamsResource, c.ns, name, opts), &imagev1.ImageStream{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImageStreams) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(imagestreamsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &imagev1.ImageStreamList{}) + return err +} + +// Patch applies the patch and returns the patched imageStream. +func (c *FakeImageStreams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *imagev1.ImageStream, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(imagestreamsResource, c.ns, name, pt, data, subresources...), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied imageStream. +func (c *FakeImageStreams) Apply(ctx context.Context, imageStream *applyconfigurationsimagev1.ImageStreamApplyConfiguration, opts v1.ApplyOptions) (result *imagev1.ImageStream, err error) { + if imageStream == nil { + return nil, fmt.Errorf("imageStream provided to Apply must not be nil") + } + data, err := json.Marshal(imageStream) + if err != nil { + return nil, err + } + name := imageStream.Name + if name == nil { + return nil, fmt.Errorf("imageStream.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(imagestreamsResource, c.ns, *name, types.ApplyPatchType, data), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeImageStreams) ApplyStatus(ctx context.Context, imageStream *applyconfigurationsimagev1.ImageStreamApplyConfiguration, opts v1.ApplyOptions) (result *imagev1.ImageStream, err error) { + if imageStream == nil { + return nil, fmt.Errorf("imageStream provided to Apply must not be nil") + } + data, err := json.Marshal(imageStream) + if err != nil { + return nil, err + } + name := imageStream.Name + if name == nil { + return nil, fmt.Errorf("imageStream.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(imagestreamsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// Secrets takes name of the imageStream, and returns the corresponding secretList object, and an error if there is any. +func (c *FakeImageStreams) Secrets(ctx context.Context, imageStreamName string, options v1.GetOptions) (result *imagev1.SecretList, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(imagestreamsResource, c.ns, "secrets", imageStreamName), &imagev1.SecretList{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.SecretList), err +} + +// Layers takes name of the imageStream, and returns the corresponding imageStreamLayers object, and an error if there is any. +func (c *FakeImageStreams) Layers(ctx context.Context, imageStreamName string, options v1.GetOptions) (result *imagev1.ImageStreamLayers, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(imagestreamsResource, c.ns, "layers", imageStreamName), &imagev1.ImageStreamLayers{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamLayers), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimage.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimage.go new file mode 100644 index 0000000000..aa97914259 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimage.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamImages implements ImageStreamImageInterface +type FakeImageStreamImages struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamimagesResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreamimages"} + +var imagestreamimagesKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStreamImage"} + +// Get takes name of the imageStreamImage, and returns the corresponding imageStreamImage object, and an error if there is any. +func (c *FakeImageStreamImages) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageStreamImage, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagestreamimagesResource, c.ns, name), &imagev1.ImageStreamImage{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamImage), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimport.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimport.go new file mode 100644 index 0000000000..5e7e5da326 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimport.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/openshift/api/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamImports implements ImageStreamImportInterface +type FakeImageStreamImports struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamimportsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreamimports"} + +var imagestreamimportsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStreamImport"} + +// Create takes the representation of a imageStreamImport and creates it. Returns the server's representation of the imageStreamImport, and an error, if there is any. +func (c *FakeImageStreamImports) Create(ctx context.Context, imageStreamImport *v1.ImageStreamImport, opts metav1.CreateOptions) (result *v1.ImageStreamImport, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagestreamimportsResource, c.ns, imageStreamImport), &v1.ImageStreamImport{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ImageStreamImport), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreammapping.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreammapping.go new file mode 100644 index 0000000000..d50ddbd1ed --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreammapping.go @@ -0,0 +1,59 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + imagev1 "github.com/openshift/api/image/v1" + v1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamMappings implements ImageStreamMappingInterface +type FakeImageStreamMappings struct { + Fake *FakeImageV1 + ns string +} + +var imagestreammappingsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreammappings"} + +var imagestreammappingsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStreamMapping"} + +// Apply takes the given apply declarative configuration, applies it and returns the applied imageStreamMapping. +func (c *FakeImageStreamMappings) Apply(ctx context.Context, imageStreamMapping *v1.ImageStreamMappingApplyConfiguration, opts metav1.ApplyOptions) (result *imagev1.ImageStreamMapping, err error) { + if imageStreamMapping == nil { + return nil, fmt.Errorf("imageStreamMapping provided to Apply must not be nil") + } + data, err := json.Marshal(imageStreamMapping) + if err != nil { + return nil, err + } + name := imageStreamMapping.Name + if name == nil { + return nil, fmt.Errorf("imageStreamMapping.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(imagestreammappingsResource, c.ns, *name, types.ApplyPatchType, data), &imagev1.ImageStreamMapping{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamMapping), err +} + +// Create takes the representation of a imageStreamMapping and creates it. Returns the server's representation of the status, and an error, if there is any. +func (c *FakeImageStreamMappings) Create(ctx context.Context, imageStreamMapping *imagev1.ImageStreamMapping, opts metav1.CreateOptions) (result *metav1.Status, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagestreammappingsResource, c.ns, imageStreamMapping), &metav1.Status{}) + + if obj == nil { + return nil, err + } + return obj.(*metav1.Status), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamtag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamtag.go new file mode 100644 index 0000000000..0befdecac1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamtag.go @@ -0,0 +1,86 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamTags implements ImageStreamTagInterface +type FakeImageStreamTags struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamtagsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreamtags"} + +var imagestreamtagsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStreamTag"} + +// Get takes name of the imageStreamTag, and returns the corresponding imageStreamTag object, and an error if there is any. +func (c *FakeImageStreamTags) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageStreamTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagestreamtagsResource, c.ns, name), &imagev1.ImageStreamTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamTag), err +} + +// List takes label and field selectors, and returns the list of ImageStreamTags that match those selectors. +func (c *FakeImageStreamTags) List(ctx context.Context, opts v1.ListOptions) (result *imagev1.ImageStreamTagList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(imagestreamtagsResource, imagestreamtagsKind, c.ns, opts), &imagev1.ImageStreamTagList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &imagev1.ImageStreamTagList{ListMeta: obj.(*imagev1.ImageStreamTagList).ListMeta} + for _, item := range obj.(*imagev1.ImageStreamTagList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Create takes the representation of a imageStreamTag and creates it. Returns the server's representation of the imageStreamTag, and an error, if there is any. +func (c *FakeImageStreamTags) Create(ctx context.Context, imageStreamTag *imagev1.ImageStreamTag, opts v1.CreateOptions) (result *imagev1.ImageStreamTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagestreamtagsResource, c.ns, imageStreamTag), &imagev1.ImageStreamTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamTag), err +} + +// Update takes the representation of a imageStreamTag and updates it. Returns the server's representation of the imageStreamTag, and an error, if there is any. +func (c *FakeImageStreamTags) Update(ctx context.Context, imageStreamTag *imagev1.ImageStreamTag, opts v1.UpdateOptions) (result *imagev1.ImageStreamTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(imagestreamtagsResource, c.ns, imageStreamTag), &imagev1.ImageStreamTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamTag), err +} + +// Delete takes name of the imageStreamTag and deletes it. Returns an error if one occurs. +func (c *FakeImageStreamTags) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(imagestreamtagsResource, c.ns, name, opts), &imagev1.ImageStreamTag{}) + + return err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagetag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagetag.go new file mode 100644 index 0000000000..6bf41d7d97 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagetag.go @@ -0,0 +1,86 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageTags implements ImageTagInterface +type FakeImageTags struct { + Fake *FakeImageV1 + ns string +} + +var imagetagsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagetags"} + +var imagetagsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageTag"} + +// Get takes name of the imageTag, and returns the corresponding imageTag object, and an error if there is any. +func (c *FakeImageTags) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagetagsResource, c.ns, name), &imagev1.ImageTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageTag), err +} + +// List takes label and field selectors, and returns the list of ImageTags that match those selectors. +func (c *FakeImageTags) List(ctx context.Context, opts v1.ListOptions) (result *imagev1.ImageTagList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(imagetagsResource, imagetagsKind, c.ns, opts), &imagev1.ImageTagList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &imagev1.ImageTagList{ListMeta: obj.(*imagev1.ImageTagList).ListMeta} + for _, item := range obj.(*imagev1.ImageTagList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Create takes the representation of a imageTag and creates it. Returns the server's representation of the imageTag, and an error, if there is any. +func (c *FakeImageTags) Create(ctx context.Context, imageTag *imagev1.ImageTag, opts v1.CreateOptions) (result *imagev1.ImageTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagetagsResource, c.ns, imageTag), &imagev1.ImageTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageTag), err +} + +// Update takes the representation of a imageTag and updates it. Returns the server's representation of the imageTag, and an error, if there is any. +func (c *FakeImageTags) Update(ctx context.Context, imageTag *imagev1.ImageTag, opts v1.UpdateOptions) (result *imagev1.ImageTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(imagetagsResource, c.ns, imageTag), &imagev1.ImageTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageTag), err +} + +// Delete takes name of the imageTag and deletes it. Returns an error if one occurs. +func (c *FakeImageTags) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(imagetagsResource, c.ns, name, opts), &imagev1.ImageTag{}) + + return err +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go new file mode 100644 index 0000000000..067795180f --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go @@ -0,0 +1,164 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/image/clientset/versioned" + image "github.com/openshift/client-go/image/informers/externalversions/image" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Image() image.Interface +} + +func (f *sharedInformerFactory) Image() image.Interface { + return image.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go new file mode 100644 index 0000000000..55f59dedef --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go @@ -0,0 +1,48 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/openshift/api/image/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=image.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("images"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Image().V1().Images().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("imagestreams"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Image().V1().ImageStreams().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go new file mode 100644 index 0000000000..092550ed3d --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package image + +import ( + v1 "github.com/openshift/client-go/image/informers/externalversions/image/v1" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go new file mode 100644 index 0000000000..ee2d0a7067 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + imagev1 "github.com/openshift/api/image/v1" + versioned "github.com/openshift/client-go/image/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/image/listers/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageInformer provides access to a shared informer and lister for +// Images. +type ImageInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ImageLister +} + +type imageInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().Images().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().Images().Watch(context.TODO(), options) + }, + }, + &imagev1.Image{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&imagev1.Image{}, f.defaultInformer) +} + +func (f *imageInformer) Lister() v1.ImageLister { + return v1.NewImageLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go new file mode 100644 index 0000000000..4a94cc5c7d --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + imagev1 "github.com/openshift/api/image/v1" + versioned "github.com/openshift/client-go/image/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/image/listers/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageStreamInformer provides access to a shared informer and lister for +// ImageStreams. +type ImageStreamInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ImageStreamLister +} + +type imageStreamInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewImageStreamInformer constructs a new informer for ImageStream type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageStreamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageStreamInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredImageStreamInformer constructs a new informer for ImageStream type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageStreamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().ImageStreams(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().ImageStreams(namespace).Watch(context.TODO(), options) + }, + }, + &imagev1.ImageStream{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageStreamInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageStreamInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageStreamInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&imagev1.ImageStream{}, f.defaultInformer) +} + +func (f *imageStreamInformer) Lister() v1.ImageStreamLister { + return v1.NewImageStreamLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go new file mode 100644 index 0000000000..fd35c4df1a --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go @@ -0,0 +1,36 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Images returns a ImageInformer. + Images() ImageInformer + // ImageStreams returns a ImageStreamInformer. + ImageStreams() ImageStreamInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Images returns a ImageInformer. +func (v *version) Images() ImageInformer { + return &imageInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ImageStreams returns a ImageStreamInformer. +func (v *version) ImageStreams() ImageStreamInformer { + return &imageStreamInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..c35dcbfa44 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/image/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go new file mode 100644 index 0000000000..308b6db702 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go @@ -0,0 +1,31 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// ImageListerExpansion allows custom methods to be added to +// ImageLister. +type ImageListerExpansion interface{} + +// ImageStreamListerExpansion allows custom methods to be added to +// ImageStreamLister. +type ImageStreamListerExpansion interface{} + +// ImageStreamNamespaceListerExpansion allows custom methods to be added to +// ImageStreamNamespaceLister. +type ImageStreamNamespaceListerExpansion interface{} + +// ImageStreamTagListerExpansion allows custom methods to be added to +// ImageStreamTagLister. +type ImageStreamTagListerExpansion interface{} + +// ImageStreamTagNamespaceListerExpansion allows custom methods to be added to +// ImageStreamTagNamespaceLister. +type ImageStreamTagNamespaceListerExpansion interface{} + +// ImageTagListerExpansion allows custom methods to be added to +// ImageTagLister. +type ImageTagListerExpansion interface{} + +// ImageTagNamespaceListerExpansion allows custom methods to be added to +// ImageTagNamespaceLister. +type ImageTagNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go new file mode 100644 index 0000000000..bb66460a77 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go @@ -0,0 +1,52 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ImageLister helps list Images. +// All objects returned here must be treated as read-only. +type ImageLister interface { + // List lists all Images in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Image, err error) + // Get retrieves the Image from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Image, error) + ImageListerExpansion +} + +// imageLister implements the ImageLister interface. +type imageLister struct { + indexer cache.Indexer +} + +// NewImageLister returns a new ImageLister. +func NewImageLister(indexer cache.Indexer) ImageLister { + return &imageLister{indexer: indexer} +} + +// List lists all Images in the indexer. +func (s *imageLister) List(selector labels.Selector) (ret []*v1.Image, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Image)) + }) + return ret, err +} + +// Get retrieves the Image from the index for a given name. +func (s *imageLister) Get(name string) (*v1.Image, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("image"), name) + } + return obj.(*v1.Image), nil +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go new file mode 100644 index 0000000000..02ed4da365 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ImageStreamLister helps list ImageStreams. +// All objects returned here must be treated as read-only. +type ImageStreamLister interface { + // List lists all ImageStreams in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStream, err error) + // ImageStreams returns an object that can list and get ImageStreams. + ImageStreams(namespace string) ImageStreamNamespaceLister + ImageStreamListerExpansion +} + +// imageStreamLister implements the ImageStreamLister interface. +type imageStreamLister struct { + indexer cache.Indexer +} + +// NewImageStreamLister returns a new ImageStreamLister. +func NewImageStreamLister(indexer cache.Indexer) ImageStreamLister { + return &imageStreamLister{indexer: indexer} +} + +// List lists all ImageStreams in the indexer. +func (s *imageStreamLister) List(selector labels.Selector) (ret []*v1.ImageStream, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageStream)) + }) + return ret, err +} + +// ImageStreams returns an object that can list and get ImageStreams. +func (s *imageStreamLister) ImageStreams(namespace string) ImageStreamNamespaceLister { + return imageStreamNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ImageStreamNamespaceLister helps list and get ImageStreams. +// All objects returned here must be treated as read-only. +type ImageStreamNamespaceLister interface { + // List lists all ImageStreams in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStream, err error) + // Get retrieves the ImageStream from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ImageStream, error) + ImageStreamNamespaceListerExpansion +} + +// imageStreamNamespaceLister implements the ImageStreamNamespaceLister +// interface. +type imageStreamNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ImageStreams in the indexer for a given namespace. +func (s imageStreamNamespaceLister) List(selector labels.Selector) (ret []*v1.ImageStream, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageStream)) + }) + return ret, err +} + +// Get retrieves the ImageStream from the indexer for a given namespace and name. +func (s imageStreamNamespaceLister) Get(name string) (*v1.ImageStream, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("imagestream"), name) + } + return obj.(*v1.ImageStream), nil +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go new file mode 100644 index 0000000000..6042b27bbe --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ImageStreamTagLister helps list ImageStreamTags. +// All objects returned here must be treated as read-only. +type ImageStreamTagLister interface { + // List lists all ImageStreamTags in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) + // ImageStreamTags returns an object that can list and get ImageStreamTags. + ImageStreamTags(namespace string) ImageStreamTagNamespaceLister + ImageStreamTagListerExpansion +} + +// imageStreamTagLister implements the ImageStreamTagLister interface. +type imageStreamTagLister struct { + indexer cache.Indexer +} + +// NewImageStreamTagLister returns a new ImageStreamTagLister. +func NewImageStreamTagLister(indexer cache.Indexer) ImageStreamTagLister { + return &imageStreamTagLister{indexer: indexer} +} + +// List lists all ImageStreamTags in the indexer. +func (s *imageStreamTagLister) List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageStreamTag)) + }) + return ret, err +} + +// ImageStreamTags returns an object that can list and get ImageStreamTags. +func (s *imageStreamTagLister) ImageStreamTags(namespace string) ImageStreamTagNamespaceLister { + return imageStreamTagNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ImageStreamTagNamespaceLister helps list and get ImageStreamTags. +// All objects returned here must be treated as read-only. +type ImageStreamTagNamespaceLister interface { + // List lists all ImageStreamTags in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) + // Get retrieves the ImageStreamTag from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ImageStreamTag, error) + ImageStreamTagNamespaceListerExpansion +} + +// imageStreamTagNamespaceLister implements the ImageStreamTagNamespaceLister +// interface. +type imageStreamTagNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ImageStreamTags in the indexer for a given namespace. +func (s imageStreamTagNamespaceLister) List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageStreamTag)) + }) + return ret, err +} + +// Get retrieves the ImageStreamTag from the indexer for a given namespace and name. +func (s imageStreamTagNamespaceLister) Get(name string) (*v1.ImageStreamTag, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("imagestreamtag"), name) + } + return obj.(*v1.ImageStreamTag), nil +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go new file mode 100644 index 0000000000..bbc4518c23 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ImageTagLister helps list ImageTags. +// All objects returned here must be treated as read-only. +type ImageTagLister interface { + // List lists all ImageTags in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageTag, err error) + // ImageTags returns an object that can list and get ImageTags. + ImageTags(namespace string) ImageTagNamespaceLister + ImageTagListerExpansion +} + +// imageTagLister implements the ImageTagLister interface. +type imageTagLister struct { + indexer cache.Indexer +} + +// NewImageTagLister returns a new ImageTagLister. +func NewImageTagLister(indexer cache.Indexer) ImageTagLister { + return &imageTagLister{indexer: indexer} +} + +// List lists all ImageTags in the indexer. +func (s *imageTagLister) List(selector labels.Selector) (ret []*v1.ImageTag, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageTag)) + }) + return ret, err +} + +// ImageTags returns an object that can list and get ImageTags. +func (s *imageTagLister) ImageTags(namespace string) ImageTagNamespaceLister { + return imageTagNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ImageTagNamespaceLister helps list and get ImageTags. +// All objects returned here must be treated as read-only. +type ImageTagNamespaceLister interface { + // List lists all ImageTags in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageTag, err error) + // Get retrieves the ImageTag from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ImageTag, error) + ImageTagNamespaceListerExpansion +} + +// imageTagNamespaceLister implements the ImageTagNamespaceLister +// interface. +type imageTagNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ImageTags in the indexer for a given namespace. +func (s imageTagNamespaceLister) List(selector labels.Selector) (ret []*v1.ImageTag, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageTag)) + }) + return ret, err +} + +// Get retrieves the ImageTag from the indexer for a given namespace and name. +func (s imageTagNamespaceLister) Get(name string) (*v1.ImageTag, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("imagetag"), name) + } + return obj.(*v1.ImageTag), nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index fc98ba0579..7874b89f56 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -878,8 +878,16 @@ github.com/openshift/api/user/v1 ## explicit; go 1.20 github.com/openshift/client-go/build/applyconfigurations/build/v1 github.com/openshift/client-go/build/applyconfigurations/internal +github.com/openshift/client-go/build/clientset/versioned +github.com/openshift/client-go/build/clientset/versioned/fake github.com/openshift/client-go/build/clientset/versioned/scheme github.com/openshift/client-go/build/clientset/versioned/typed/build/v1 +github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake +github.com/openshift/client-go/build/informers/externalversions +github.com/openshift/client-go/build/informers/externalversions/build +github.com/openshift/client-go/build/informers/externalversions/build/v1 +github.com/openshift/client-go/build/informers/externalversions/internalinterfaces +github.com/openshift/client-go/build/listers/build/v1 github.com/openshift/client-go/config/applyconfigurations/config/v1 github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1 github.com/openshift/client-go/config/applyconfigurations/internal @@ -899,8 +907,16 @@ github.com/openshift/client-go/config/listers/config/v1 github.com/openshift/client-go/config/listers/config/v1alpha1 github.com/openshift/client-go/image/applyconfigurations/image/v1 github.com/openshift/client-go/image/applyconfigurations/internal +github.com/openshift/client-go/image/clientset/versioned +github.com/openshift/client-go/image/clientset/versioned/fake github.com/openshift/client-go/image/clientset/versioned/scheme github.com/openshift/client-go/image/clientset/versioned/typed/image/v1 +github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake +github.com/openshift/client-go/image/informers/externalversions +github.com/openshift/client-go/image/informers/externalversions/image +github.com/openshift/client-go/image/informers/externalversions/image/v1 +github.com/openshift/client-go/image/informers/externalversions/internalinterfaces +github.com/openshift/client-go/image/listers/image/v1 github.com/openshift/client-go/operator/applyconfigurations/internal github.com/openshift/client-go/operator/applyconfigurations/operator/v1 github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1 From 692f814b914c9bad16917f7162dc7a6c04488d26 Mon Sep 17 00:00:00 2001 From: Zack Zlotnik Date: Thu, 6 Apr 2023 16:02:52 -0400 Subject: [PATCH 7/9] introduces buildcontroller with image builder backends --- internal/clients/builder.go | 10 + .../v1/types.go | 8 + .../Dockerfile.on-cluster-build-template | 33 + pkg/controller/build/assets/build.sh | 31 + pkg/controller/build/assets/wait.sh | 19 + pkg/controller/build/build_controller.go | 1849 ++++++++--------- pkg/controller/build/build_controller_test.go | 674 ++++++ pkg/controller/build/fixtures_test.go | 566 +++++ pkg/controller/build/helpers.go | 252 +++ pkg/controller/build/helpers_test.go | 163 ++ .../build/image_build_controller.go | 335 +++ pkg/controller/build/image_build_request.go | 423 ++++ .../build/image_build_request_test.go | 80 + pkg/controller/build/pod_build_controller.go | 359 ++++ pkg/controller/build/pool_build_resources.go | 100 - pkg/controller/common/constants.go | 33 +- pkg/controller/common/helpers.go | 13 +- pkg/daemon/constants/constants.go | 5 + 18 files changed, 3869 insertions(+), 1084 deletions(-) create mode 100644 pkg/controller/build/assets/Dockerfile.on-cluster-build-template create mode 100644 pkg/controller/build/assets/build.sh create mode 100644 pkg/controller/build/assets/wait.sh create mode 100644 pkg/controller/build/build_controller_test.go create mode 100644 pkg/controller/build/fixtures_test.go create mode 100644 pkg/controller/build/helpers.go create mode 100644 pkg/controller/build/helpers_test.go create mode 100644 pkg/controller/build/image_build_controller.go create mode 100644 pkg/controller/build/image_build_request.go create mode 100644 pkg/controller/build/image_build_request_test.go create mode 100644 pkg/controller/build/pod_build_controller.go delete mode 100644 pkg/controller/build/pool_build_resources.go diff --git a/internal/clients/builder.go b/internal/clients/builder.go index ea3d0aff26..561774d0c0 100644 --- a/internal/clients/builder.go +++ b/internal/clients/builder.go @@ -3,7 +3,9 @@ package clients import ( "os" + buildclientset "github.com/openshift/client-go/build/clientset/versioned" configclientset "github.com/openshift/client-go/config/clientset/versioned" + imageclientset "github.com/openshift/client-go/image/clientset/versioned" operatorclientset "github.com/openshift/client-go/operator/clientset/versioned" mcfgclientset "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned" apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -56,6 +58,14 @@ func (cb *Builder) APIExtClientOrDie(name string) apiext.Interface { return apiext.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) } +func (cb *Builder) BuildClientOrDie(name string) buildclientset.Interface { + return buildclientset.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) +} + +func (cb *Builder) ImageClientOrDie(name string) imageclientset.Interface { + return imageclientset.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) +} + // GetBuilderConfig returns a copy of the builders *rest.Config func (cb *Builder) GetBuilderConfig() *rest.Config { return rest.CopyConfig(cb.config) diff --git a/pkg/apis/machineconfiguration.openshift.io/v1/types.go b/pkg/apis/machineconfiguration.openshift.io/v1/types.go index 8cfa3ba29d..3c780e7d5b 100644 --- a/pkg/apis/machineconfiguration.openshift.io/v1/types.go +++ b/pkg/apis/machineconfiguration.openshift.io/v1/types.go @@ -358,6 +358,14 @@ const ( // MachineConfigPoolDegraded is the overall status of the pool based, today, on whether we fail with NodeDegraded or RenderDegraded MachineConfigPoolDegraded MachineConfigPoolConditionType = "Degraded" + + MachineConfigPoolBuildPending MachineConfigPoolConditionType = "BuildPending" + + MachineConfigPoolBuilding MachineConfigPoolConditionType = "Building" + + MachineConfigPoolBuildSuccess MachineConfigPoolConditionType = "BuildSuccess" + + MachineConfigPoolBuildFailed MachineConfigPoolConditionType = "BuildFailed" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/controller/build/assets/Dockerfile.on-cluster-build-template b/pkg/controller/build/assets/Dockerfile.on-cluster-build-template new file mode 100644 index 0000000000..24ed777b95 --- /dev/null +++ b/pkg/controller/build/assets/Dockerfile.on-cluster-build-template @@ -0,0 +1,33 @@ +# This Dockerfile is not intended to be directly built. Instead, it is embedded +# within the Build Controller binary (see //go:embed) and templatized with +# certain options around base image pullspecs. +# +# Decode and extract the MachineConfig from the gzipped ConfigMap and move it +# into position. We do this in a separate stage so that we don't have the +# gzipped MachineConfig laying around. +FROM {{.BaseImage.Pullspec}} AS extract +COPY ./machineconfig/machineconfig.json.gz /tmp/machineconfig.json.gz +RUN mkdir -p /etc/machine-config-daemon && \ + cat /tmp/machineconfig.json.gz | base64 -d | gunzip - > /etc/machine-config-daemon/currentconfig + +{{if .ExtensionsImage.Pullspec}} +# Pull our extensions image. Not sure yet what / how this should be wired up +# though. Ideally, I'd like to use some Buildah tricks to have the extensions +# directory mounted into the container at build-time so that I don't have to +# copy the RPMs into the container, configure the repo, and do the +# installation. Alternatively, I'd have to start a pod with an HTTP server. +FROM {{.ExtensionsImage.Pullspec}} AS extensions +{{end}} + + +FROM {{.BaseImage.Pullspec}} AS final +# Copy the extracted MachineConfig into the expected place in the image. +COPY --from=extract /etc/machine-config-daemon/currentconfig /etc/machine-config-daemon/currentconfig +# Do the ignition live-apply, extracting the Ignition config from the MachineConfig. +RUN exec -a ignition-apply /usr/lib/dracut/modules.d/30ignition/ignition --ignore-unsupported <(cat /etc/machine-config-daemon/currentconfig | jq '.spec.config') && \ + ostree container commit + +LABEL machineconfig={{.Pool.Spec.Configuration.Name}} +LABEL machineconfigpool={{.Pool.Name}} +LABEL releaseversion={{.ReleaseVersion}} +LABEL baseOSContainerImage={{.BaseImage.Pullspec}} diff --git a/pkg/controller/build/assets/build.sh b/pkg/controller/build/assets/build.sh new file mode 100644 index 0000000000..b93cf32554 --- /dev/null +++ b/pkg/controller/build/assets/build.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# +# This script is not meant to be directly executed. Instead, it is embedded +# within the Build Controller binary (see //go:embed) and injected into a +# custom build pod. +set -xeuo + +build_context="$HOME/context" + +# Create a directory to hold our build context. +mkdir -p "$build_context/machineconfig" + +# Copy the Dockerfile and Machineconfigs from configmaps into our build context. +cp /tmp/dockerfile/Dockerfile "$build_context" +cp /tmp/machineconfig/machineconfig.json.gz "$build_context/machineconfig/" + +# Build our image using Buildah. +buildah bud \ + --storage-driver vfs \ + --authfile="$BASE_IMAGE_PULL_CREDS" \ + --tag "$TAG" \ + --file="$build_context/Dockerfile" "$build_context" + +# Push our built image. +buildah push \ + --storage-driver vfs \ + --authfile="$FINAL_IMAGE_PUSH_CREDS" \ + --cert-dir /var/run/secrets/kubernetes.io/serviceaccount "$TAG" + +# Signal that we're done. +echo "done" > /tmp/done/done diff --git a/pkg/controller/build/assets/wait.sh b/pkg/controller/build/assets/wait.sh new file mode 100644 index 0000000000..fbc059ba13 --- /dev/null +++ b/pkg/controller/build/assets/wait.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# +# This script is not meant to be directly executed. Instead, it is embedded +# within the Build Controller binary (see //go:embed) and injected into a +# custom build pod. + +# Wait until the done file appears. +while [ ! -f "/tmp/done/done" ] +do + sleep 1 +done + +# Inspect the image to get the digest from the registry. This produces JSON +# output which we then scrape the pod logs for. This is why we're not using set +# -x for this script. +skopeo inspect \ + --authfile "$FINAL_IMAGE_PUSH_CREDS" \ + --cert-dir /var/run/secrets/kubernetes.io/serviceaccount \ + "docker://$TAG" diff --git a/pkg/controller/build/build_controller.go b/pkg/controller/build/build_controller.go index 6b4b6ae170..77a0814103 100644 --- a/pkg/controller/build/build_controller.go +++ b/pkg/controller/build/build_controller.go @@ -1,19 +1,18 @@ package build import ( + "bytes" "context" - "encoding/json" "fmt" - "sort" + "strings" "time" - "github.com/golang/glog" + "github.com/containers/image/v5/docker/reference" + buildv1 "github.com/openshift/api/build/v1" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" + aggerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -21,94 +20,105 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" - apierrors "k8s.io/apimachinery/pkg/api/errors" - - imageinformersv1 "github.com/openshift/client-go/image/informers/externalversions/image/v1" - imagelistersv1 "github.com/openshift/client-go/image/listers/image/v1" + buildinformers "github.com/openshift/client-go/build/informers/externalversions" buildinformersv1 "github.com/openshift/client-go/build/informers/externalversions/build/v1" - buildlistersv1 "github.com/openshift/client-go/build/listers/build/v1" buildclientset "github.com/openshift/client-go/build/clientset/versioned" - imageclientset "github.com/openshift/client-go/image/clientset/versioned" mcfgclientset "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned" + mcfginformers "github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions" + mcfginformersv1 "github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions/machineconfiguration.openshift.io/v1" mcfglistersv1 "github.com/openshift/machine-config-operator/pkg/generated/listers/machineconfiguration.openshift.io/v1" - buildv1 "github.com/openshift/api/build/v1" - imagev1 "github.com/openshift/api/image/v1" + coreinformers "k8s.io/client-go/informers" + coreinformersv1 "k8s.io/client-go/informers/core/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ign3types "github.com/coreos/ignition/v2/config/v3_2/types" - "github.com/vincent-petithory/dataurl" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + + "github.com/openshift/machine-config-operator/internal/clients" ) const ( + targetMachineConfigPoolLabel = "machineconfiguration.openshift.io/targetMachineConfigPool" + // TODO(zzlotnik): Is there a constant for this someplace else? + desiredConfigLabel = "machineconfiguration.openshift.io/desiredConfig" +) - // maxRetries is the number of times a machineconfig pool will be retried before it is dropped out of the queue. - // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times - // a machineconfig pool is going to be requeued: - // - // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s - maxRetries = 15 +// on-cluster-build-config ConfigMap keys. +const ( + // Name of ConfigMap which contains knobs for configuring the build controller. + onClusterBuildConfigMapName = "on-cluster-build-config" - // updateDelay is a pause to deal with churn in MachineConfigs; see - // https://github.com/openshift/machine-config-operator/issues/301 - updateDelay = 5 * time.Second - - machineConfigContentDockerfile = ` - # Multistage build, we need to grab the files from our config imagestream - FROM image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/{{.RenderedConfig }} AS machineconfig - - # We're actually basing on the "new format" image from the coreos base image stream - FROM image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/coreos - - # Pull in the files from our machineconfig stage - COPY --from=machineconfig /machine-config-ignition.json /etc/machine-config-ignition.json - - # Make the config drift checker happy - COPY --from=machineconfig /machine-config.json /etc/machine-config-daemon/currentconfig - - # Apply the config to the image - ENV container=1 - RUN exec -a ignition-apply /usr/lib/dracut/modules.d/30ignition/ignition --ignore-unsupported /etc/machine-config-ignition.json - - # Rebuild origin.d (I included an /etc/yum.repos.d/ file in my machineconfig so it could find the RPMS, that's why this works) - RUN rpm-ostree ex rebuild && rm -rf /var/cache /etc/rpm-ostree/origin.d - - # clean up. We want to be particularly strict so that live apply works - RUN rm /etc/machine-config-ignition.json - # TODO remove these hacks once we have - # https://github.com/coreos/rpm-ostree/pull/3544 - # and - # https://github.com/coreos/ignition/issues/1339 is fixed - # don't fail if wildcard has no matches - RUN bash -c "rm /usr/share/rpm/__db.*"; true - # to keep live apply working - RUN bash -c "if [[ -e /etc/systemd/system-preset/20-ignition.preset ]]; then sort /etc/systemd/system-preset/20-ignition.preset -o /etc/systemd/system-preset/20-ignition.preset; fi" - - # This is so we can get the machineconfig injected - ARG machineconfig=unknown - # Apply the injected machineconfig name as a label so node_controller can check it - LABEL machineconfig=$machineconfig - ` - dummyDockerfile = `FROM dummy` + // The on-cluster-build-config ConfigMap key which contains a K8s secret capable of pulling of the base OS image. + baseImagePullSecretNameConfigKey = "baseImagePullSecretName" + + // The on-cluster-build-config ConfigMap key which contains a K8s secret capable of pushing the final OS image. + finalImagePushSecretNameConfigKey = "finalImagePushSecretName" + + // The on-cluster-build-config ConfigMap key which contains the pullspec of where to push the final OS image (e.g., registry.hostname.com/org/repo:tag). + finalImagePullspecConfigKey = "finalImagePullspec" +) + +// machine-config-osimageurl ConfigMap keys. +const ( + // TODO: Is this a constant someplace else? + machineConfigOSImageURLConfigMapName = "machine-config-osimageurl" + + // The machine-config-osimageurl ConfigMap key which contains the pullspec of the base OS image (e.g., registry.hostname.com/org/repo:tag). + baseOSContainerImageConfigKey = "baseOSContainerImage" + + // The machine-config-osimageurl ConfigMap key which contains the pullspec of the base OS image (e.g., registry.hostname.com/org/repo:tag). + baseOSExtensionsContainerImageConfigKey = "baseOSExtensionsContainerImage" + + // The machine-config-osimageurl ConfigMap key which contains the current OpenShift release version. + releaseVersionConfigKey = "releaseVersion" + + // The machine-config-osimageurl ConfigMap key which contains the osImageURL + // value. I don't think we actually use this anywhere though. + osImageURLConfigKey = "osImageURL" ) var ( // controllerKind contains the schema.GroupVersionKind for this controller type. + //nolint:varcheck,deadcode // This will be used eventually controllerKind = mcfgv1.SchemeGroupVersion.WithKind("MachineConfigPool") ) +//nolint:revive // If I name this ControllerConfig, that name will be overloaded :P +type BuildControllerConfig struct { + // updateDelay is a pause to deal with churn in MachineConfigs; see + // https://github.com/openshift/machine-config-operator/issues/301 + // Default: 5 seconds + UpdateDelay time.Duration + + // maxRetries is the number of times a machineconfig pool will be retried before it is dropped out of the queue. + // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times + // a machineconfig pool is going to be requeued: + // + // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s + // Default: 5 + MaxRetries int +} + +type ImageBuilder interface { + Run(context.Context, int) + StartBuild(ImageBuildRequest) (*corev1.ObjectReference, error) + IsBuildRunning(*mcfgv1.MachineConfigPool) (bool, error) + DeleteBuildObject(*mcfgv1.MachineConfigPool) error + FinalPullspec(*mcfgv1.MachineConfigPool) (string, error) +} + // Controller defines the build controller. type Controller struct { - client mcfgclientset.Interface - imageclient imageclientset.Interface - buildclient buildclientset.Interface - kubeclient clientset.Interface + *Clients + *informers eventRecorder record.EventRecorder @@ -117,100 +127,163 @@ type Controller struct { ccLister mcfglistersv1.ControllerConfigLister mcpLister mcfglistersv1.MachineConfigPoolLister - bLister buildlistersv1.BuildLister - bcLister buildlistersv1.BuildConfigLister - isLister imagelistersv1.ImageStreamLister ccListerSynced cache.InformerSynced mcpListerSynced cache.InformerSynced - bListerSynced cache.InformerSynced - bcListerSynced cache.InformerSynced - isListerSynced cache.InformerSynced + podListerSynced cache.InformerSynced queue workqueue.RateLimitingInterface + + config BuildControllerConfig + imageBuilder ImageBuilder +} + +// Creates a BuildControllerConfig with sensible production defaults. +func DefaultBuildControllerConfig() BuildControllerConfig { + return BuildControllerConfig{ + MaxRetries: 5, + UpdateDelay: time.Second * 5, + } +} + +// Holds each of the clients used by the Build Controller and its subcontrollers. +type Clients struct { + mcfgclient mcfgclientset.Interface + kubeclient clientset.Interface + buildclient buildclientset.Interface +} + +func NewClientsFromControllerContext(ctrlCtx *ctrlcommon.ControllerContext) *Clients { + return NewClients(ctrlCtx.ClientBuilder) +} + +func NewClients(cb *clients.Builder) *Clients { + return &Clients{ + mcfgclient: cb.MachineConfigClientOrDie("machine-os-builder"), + kubeclient: cb.KubeClientOrDie("machine-os-builder"), + buildclient: cb.BuildClientOrDie("machine-os-builder"), + } +} + +// Holds and starts each of the infomrers used by the Build Controller and its subcontrollers. +type informers struct { + ccInformer mcfginformersv1.ControllerConfigInformer + mcpInformer mcfginformersv1.MachineConfigPoolInformer + buildInformer buildinformersv1.BuildInformer + podInformer coreinformersv1.PodInformer + toStart []interface{ Start(<-chan struct{}) } +} + +// Starts the informers, wiring them up to the provided context. +func (i *informers) start(ctx context.Context) { + for _, startable := range i.toStart { + startable.Start(ctx.Done()) + } } -// New returns a new node controller. -func New( - ccInformer mcfginformersv1.ControllerConfigInformer, - mcpInformer mcfginformersv1.MachineConfigPoolInformer, - isInformer imageinformersv1.ImageStreamInformer, - bcInformer buildinformersv1.BuildConfigInformer, - bInformer buildinformersv1.BuildInformer, - mcfgClient mcfgclientset.Interface, - kubeClient clientset.Interface, - imageClient imageclientset.Interface, - buildClient buildclientset.Interface, +// Creates new informer instances from a given Clients(set). +func newInformers(bcc *Clients) *informers { + ccInformer := mcfginformers.NewSharedInformerFactory(bcc.mcfgclient, 0) + mcpInformer := mcfginformers.NewSharedInformerFactory(bcc.mcfgclient, 0) + buildInformer := buildinformers.NewSharedInformerFactoryWithOptions(bcc.buildclient, 0, buildinformers.WithNamespace(ctrlcommon.MCONamespace)) + podInformer := coreinformers.NewSharedInformerFactoryWithOptions(bcc.kubeclient, 0, coreinformers.WithNamespace(ctrlcommon.MCONamespace)) + + return &informers{ + ccInformer: ccInformer.Machineconfiguration().V1().ControllerConfigs(), + mcpInformer: mcpInformer.Machineconfiguration().V1().MachineConfigPools(), + buildInformer: buildInformer.Build().V1().Builds(), + podInformer: podInformer.Core().V1().Pods(), + toStart: []interface{ Start(<-chan struct{}) }{ + ccInformer, + mcpInformer, + buildInformer, + podInformer, + }, + } +} + +// Creates a basic Build Controller instance without configuring an ImageBuilder. +func newBuildController( + ctrlConfig BuildControllerConfig, + clients *Clients, ) *Controller { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: clients.kubeclient.CoreV1().Events("")}) ctrl := &Controller{ - client: mcfgClient, - imageclient: imageClient, - kubeclient: kubeClient, - buildclient: buildClient, - eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineconfigcontroller-buildcontroller"}), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineconfigcontroller-buildcontroller"), + informers: newInformers(clients), + Clients: clients, + eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineosbuilder-buildcontroller"}), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineosbuilder-buildcontroller"), + config: ctrlConfig, } - mcpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + ctrl.mcpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: ctrl.addMachineConfigPool, UpdateFunc: ctrl.updateMachineConfigPool, DeleteFunc: ctrl.deleteMachineConfigPool, }) - bInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: ctrl.addBuild, - UpdateFunc: ctrl.updateBuild, - DeleteFunc: ctrl.deleteBuild, - }) - bcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: ctrl.addBuildConfig, - UpdateFunc: ctrl.updateBuildConfig, - DeleteFunc: ctrl.deleteBuildConfig, - }) - isInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: ctrl.addImageStream, - UpdateFunc: ctrl.updateImageStream, - DeleteFunc: ctrl.deleteImageStream, - }) ctrl.syncHandler = ctrl.syncMachineConfigPool ctrl.enqueueMachineConfigPool = ctrl.enqueueDefault - ctrl.ccLister = ccInformer.Lister() - ctrl.mcpLister = mcpInformer.Lister() - ctrl.isLister = isInformer.Lister() - ctrl.bcLister = bcInformer.Lister() - ctrl.bLister = bInformer.Lister() + ctrl.ccLister = ctrl.ccInformer.Lister() + ctrl.mcpLister = ctrl.mcpInformer.Lister() - ctrl.ccListerSynced = ccInformer.Informer().HasSynced - ctrl.mcpListerSynced = mcpInformer.Informer().HasSynced - ctrl.isListerSynced = isInformer.Informer().HasSynced - ctrl.bcListerSynced = bcInformer.Informer().HasSynced - ctrl.bListerSynced = bInformer.Informer().HasSynced + ctrl.ccListerSynced = ctrl.ccInformer.Informer().HasSynced + ctrl.mcpListerSynced = ctrl.mcpInformer.Informer().HasSynced + + return ctrl +} + +// Creates a Build Controller instance with a custom pod builder implementation +// for the ImageBuilder. +func NewWithCustomPodBuilder( + ctrlConfig BuildControllerConfig, + clients *Clients, +) *Controller { + ctrl := newBuildController(ctrlConfig, clients) + ctrl.imageBuilder = newPodBuildController(ctrlConfig, clients, ctrl.customBuildPodUpdater) + return ctrl +} +// Creates a Build Controller instance with an OpenShift Image Builder +// implementation for the ImageBuilder. +func NewWithImageBuilder( + ctrlConfig BuildControllerConfig, + clients *Clients, +) *Controller { + ctrl := newBuildController(ctrlConfig, clients) + ctrl.imageBuilder = newImageBuildController(ctrlConfig, clients, ctrl.imageBuildUpdater) return ctrl } // Run executes the render controller. -func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { +// TODO: Make this use a context instead of a stop channel. +func (ctrl *Controller) Run(parentCtx context.Context, workers int) { + klog.Info("Starting MachineOSBuilder-BuildController") + defer klog.Info("Shutting down MachineOSBuilder-BuildController") + + // Not sure if I actually need a child context here or not. + ctx, cancel := context.WithCancel(parentCtx) defer utilruntime.HandleCrash() defer ctrl.queue.ShutDown() + defer cancel() - if !cache.WaitForCacheSync(stopCh, ctrl.mcpListerSynced, ctrl.ccListerSynced, ctrl.bListerSynced, ctrl.bcListerSynced, ctrl.isListerSynced) { + ctrl.informers.start(ctx) + + if !cache.WaitForCacheSync(ctx.Done(), ctrl.mcpListerSynced, ctrl.ccListerSynced) { return } - glog.Info("Starting MachineConfigController-BuildController") - defer glog.Info("Shutting down MachineConfigController-BuildController") + go ctrl.imageBuilder.Run(ctx, workers) for i := 0; i < workers; i++ { - go wait.Until(ctrl.worker, time.Second, stopCh) + go wait.Until(ctrl.worker, time.Second, ctx.Done()) } - <-stopCh + <-ctx.Done() } func (ctrl *Controller) enqueue(pool *mcfgv1.MachineConfigPool) { @@ -246,7 +319,7 @@ func (ctrl *Controller) enqueueAfter(pool *mcfgv1.MachineConfigPool, after time. // enqueueDefault calls a default enqueue function func (ctrl *Controller) enqueueDefault(pool *mcfgv1.MachineConfigPool) { - ctrl.enqueueAfter(pool, updateDelay) + ctrl.enqueueAfter(pool, ctrl.config.UpdateDelay) } // worker runs a worker thread that just dequeues items, processes them, and marks them done. @@ -269,33 +342,113 @@ func (ctrl *Controller) processNextWorkItem() bool { return true } +// Reconciles the MachineConfigPool state with the state of an OpenShift Image +// Builder object. +func (ctrl *Controller) imageBuildUpdater(build *buildv1.Build) error { + pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), build.Labels[targetMachineConfigPoolLabel], metav1.GetOptions{}) + if err != nil { + return err + } + + klog.Infof("Build (%s) is %s", build.Name, build.Status.Phase) + + objRef := toObjectRef(build) + + switch build.Status.Phase { + case buildv1.BuildPhaseNew, buildv1.BuildPhasePending: + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildPending) { + err = ctrl.markBuildPendingWithObjectRef(pool, *objRef) + } + case buildv1.BuildPhaseRunning: + // If we're running, then there's nothing to do right now. + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuilding) { + err = ctrl.markBuildInProgress(pool) + } + case buildv1.BuildPhaseComplete: + // If we've succeeded, we need to update the pool to indicate that. + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildSuccess) { + err = ctrl.markBuildSucceeded(pool) + } + case buildv1.BuildPhaseFailed, buildv1.BuildPhaseError, buildv1.BuildPhaseCancelled: + // If we've failed, errored, or cancelled, we need to update the pool to indicate that. + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildFailed) { + err = ctrl.markBuildFailed(pool) + } + } + + if err != nil { + return err + } + + ctrl.enqueueMachineConfigPool(pool) + return nil +} + +// Reconciles the MachineConfigPool state with the state of a custom pod object. +func (ctrl *Controller) customBuildPodUpdater(pod *corev1.Pod) error { + pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pod.Labels[targetMachineConfigPoolLabel], metav1.GetOptions{}) + if err != nil { + return err + } + + klog.Infof("Build pod (%s) is %s", pod.Name, pod.Status.Phase) + + switch pod.Status.Phase { + case corev1.PodPending: + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildPending) { + objRef := toObjectRef(pod) + err = ctrl.markBuildPendingWithObjectRef(pool, *objRef) + } + case corev1.PodRunning: + // If we're running, then there's nothing to do right now. + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuilding) { + err = ctrl.markBuildInProgress(pool) + } + case corev1.PodSucceeded: + // If we've succeeded, we need to update the pool to indicate that. + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildSuccess) { + err = ctrl.markBuildSucceeded(pool) + } + case corev1.PodFailed: + // If we've failed, we need to update the pool to indicate that. + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildFailed) { + err = ctrl.markBuildFailed(pool) + } + } + + if err != nil { + return err + } + + ctrl.enqueueMachineConfigPool(pool) + return nil +} + func (ctrl *Controller) handleErr(err error, key interface{}) { if err == nil { ctrl.queue.Forget(key) return } - if ctrl.queue.NumRequeues(key) < maxRetries { - glog.V(2).Infof("Error syncing machineconfigpool %v: %v", key, err) + if ctrl.queue.NumRequeues(key) < ctrl.config.MaxRetries { + klog.V(2).Infof("Error syncing machineconfigpool %v: %v", key, err) ctrl.queue.AddRateLimited(key) return } utilruntime.HandleError(err) - glog.V(2).Infof("Dropping machineconfigpool %q out of the queue: %v", key, err) + klog.V(2).Infof("Dropping machineconfigpool %q out of the queue: %v", key, err) ctrl.queue.Forget(key) ctrl.queue.AddAfter(key, 1*time.Minute) } -// TODO(jkyros): the question we're trying to answer is "is there any content that has changed that is not reflected in the current image for the pool" - // syncMachineConfigPool will sync the machineconfig pool with the given key. // This function is not meant to be invoked concurrently with the same key. func (ctrl *Controller) syncMachineConfigPool(key string) error { startTime := time.Now() - glog.V(4).Infof("Started syncing machineconfigpool %q (%v)", key, startTime) + klog.V(4).Infof("Started syncing machineconfigpool %q (%v)", key, startTime) defer func() { - glog.V(4).Infof("Finished syncing machineconfigpool %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing machineconfigpool %q (%v)", key, time.Since(startTime)) }() _, name, err := cache.SplitMetaNamespaceKey(key) @@ -303,1032 +456,842 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { return err } machineconfigpool, err := ctrl.mcpLister.Get(name) - if errors.IsNotFound(err) { - glog.V(2).Infof("MachineConfigPool %v has been deleted", key) + if k8serrors.IsNotFound(err) { + klog.V(2).Infof("MachineConfigPool %v has been deleted", key) return nil } if err != nil { return err } - // Make sure the shared base CoreOS imagestream exists - // TODO(jkyros): There seems to be a delay (probably the time it takes to pull the image) before the image tag shows up. As a result, - // when we create our base imagestream later, it's empty until this gets populated and triggers it. - _, err = ctrl.ensureCoreOSImageStream() + // TODO: Doing a deep copy of this pool object from our cache and using it to + // determine our next course of action sometimes causes a race condition. I'm + // not sure if it's better to get a current copy from the API server or what. + // pool := machineconfigpool.DeepCopy() + pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), machineconfigpool.Name, metav1.GetOptions{}) if err != nil { return err } - pool := machineconfigpool.DeepCopy() + // Not a layered pool, so stop here. + if !ctrlcommon.IsLayeredPool(pool) { + klog.V(4).Infof("MachineConfigPool %s is not opted-in for layering, ignoring", pool.Name) + return nil + } - // TODO(jkyros): take this out when we decide actual UX, this just forces the layered label on to - // the pool if its name is the string "layered" - if pool.Name == "layered" { - if pool.Labels == nil { - pool.Labels = map[string]string{} + switch { + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolDegraded): + klog.V(4).Infof("MachineConfigPool %s is degraded, requeueing", pool.Name) + ctrl.enqueueMachineConfigPool(pool) + return nil + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded): + klog.V(4).Infof("MachineConfigPool %s is render degraded, requeueing", pool.Name) + ctrl.enqueueMachineConfigPool(pool) + return nil + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildPending): + klog.V(4).Infof("MachineConfigPool %s is build pending", pool.Name) + return nil + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuilding): + klog.V(4).Infof("MachineConfigPool %s is building", pool.Name) + return nil + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildSuccess): + klog.V(4).Infof("MachineConfigPool %s has successfully built", pool.Name) + return nil + default: + shouldBuild, err := shouldWeDoABuild(ctrl.imageBuilder, pool, pool) + if err != nil { + return fmt.Errorf("could not determine if a build is required for MachineConfigPool %q: %w", pool.Name, err) } - // TODO(jkyros): we'll see if we like this, but we need a way to specify which imagestream it should use - pool.Labels[ctrlcommon.ExperimentalLayeringPoolLabel] = "" - // TODO(jkyros): Don't update this here. We're just doing this now to "steal" the pool from render_controller - _, err = ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}) - if err != nil { - return err + if shouldBuild { + return ctrl.startBuildForMachineConfigPool(pool) } - } - // If this pool isn't managed by us, the render controller will handle it - if !ctrlcommon.IsLayeredPool(pool) { - return nil + klog.V(4).Infof("Nothing to do for pool %q", pool.Name) } - // TODO(jkyros): I *could* have the build controller do the config rendering here for the pools - // that the build controller manages, but there is no escaping at least some modification to the render - // controller telling it to ignore the pools the build controller is managing. + // For everything else + return ctrl.syncAvailableStatus(pool) +} - // Stuff an entitlements machineconfig into the pool +// Marks a given MachineConfigPool as a failed build. +func (ctrl *Controller) markBuildFailed(pool *mcfgv1.MachineConfigPool) error { + klog.Errorf("Build failed for pool %s", pool.Name) - ctrl.experimentalAddEntitlements(pool) + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Reason: "BuildFailed", + Status: corev1.ConditionTrue, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildPending, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolDegraded, + Status: corev1.ConditionTrue, + }, + }) - glog.V(2).Infof("Ensuring image streams exist for pool %s", pool.Name) + return ctrl.syncFailingStatus(pool, fmt.Errorf("build failed")) +} - // Get the mapping/list of resources this pool should ensure and own - pbr := PoolBuildResources(pool) +// Marks a given MachineConfigPool as the build is in progress. +func (ctrl *Controller) markBuildInProgress(pool *mcfgv1.MachineConfigPool) error { + klog.Infof("Build in progress for MachineConfigPool %s, config %s", pool.Name, pool.Spec.Configuration.Name) - // Our list of imagestreams we need to ensure exists - var ensureImageStreams = []string{ - pbr.ImageStream.Base, - pbr.ImageStream.ExternalBase, - pbr.ImageStream.RenderedConfig, - pbr.ImageStream.Content, - pbr.ImageStream.CustomContent, - pbr.ImageStream.External, - } + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Reason: "BuildRunning", + Status: corev1.ConditionTrue, + }, + { + Type: mcfgv1.MachineConfigPoolBuildPending, + Status: corev1.ConditionFalse, + }, + }) - // Make sure the imagestreams exist so we can populate them with our image builds - for _, imageStreamName := range ensureImageStreams { - _, err := ctrl.ensureImageStreamForPool(pool, imageStreamName, pbr) - if err != nil { - // I don't know if it existed or not, I couldn't get it - return fmt.Errorf("Failed to ensure ImageStream %s: %w", imageStreamName, err) + return ctrl.syncAvailableStatus(pool) +} + +// Deletes the ephemeral objects we created to perform this specific build. +func (ctrl *Controller) postBuildCleanup(pool *mcfgv1.MachineConfigPool, ignoreMissing bool) error { + // Delete the actual build object itself. + deleteBuildObject := func() error { + err := ctrl.imageBuilder.DeleteBuildObject(pool) + + if err == nil { + klog.Infof("Deleted build object %s", newImageBuildRequest(pool).getBuildName()) } + return err } - // Magically switch imagestreams if custom/external end up with images in them - err = ctrl.ensureImageStreamPrecedenceIfPopulated(pool) - if err != nil { - return fmt.Errorf("Could not ensure proper imagestream was selected for pool %s: %w", pool.Name, err) - } + // Delete the ConfigMap containing the MachineConfig. + deleteMCConfigMap := func() error { + ibr := newImageBuildRequest(pool) - // TODO(jkyros): we could have just now set our imagestream based on changes, but we might not have a build yet + err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getMCConfigMapName(), metav1.DeleteOptions{}) - // Figure out which imagestream the pool is deploying from - poolImageStreamName, err := ctrlcommon.GetPoolImageStream(pool) - if err != nil { - return err - } + if err == nil { + klog.Infof("Deleted MachineConfig ConfigMap %s for build %s", ibr.getMCConfigMapName(), ibr.getBuildName()) + } - // Get the actual image stream object for that imagestream - poolImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(poolImageStreamName) - if err != nil { return err } - // Get the most recent image from that stream if it exists - // TODO(jkyros): this can be nil - mostRecentPoolImage := ctrl.getMostRecentImageTagForImageStream(poolImageStream, "latest") + // Delete the ConfigMap containing the Dockerfile. + deleteDockerfileConfigMap := func() error { + ibr := newImageBuildRequest(pool) - // Our list of imagestreams we need to ensure exists - var ensureBuildConfigs = []PoolBuildConfig{ - pbr.BuildConfig.Content, - pbr.BuildConfig.CustomContent, - } - - for num, pbc := range ensureBuildConfigs { + err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getDockerfileConfigMapName(), metav1.DeleteOptions{}) - checkBuildConfig, err := ctrl.ensureBuildConfigForPool(pool, &ensureBuildConfigs[num]) - if err != nil { - // I don't know if it existed or not, I couldn't get it - return fmt.Errorf("Failed to ensure BuildConfig %s: %w", pbc.Name, err) + if err == nil { + klog.Infof("Deleted Dockerfile ConfigMap %s for build %s", ibr.getDockerfileConfigMapName(), ibr.getBuildName()) } - // We're looking for builds that belong to this buildconfig, so craft a filter - ourBuildReq, err := labels.NewRequirement("buildconfig", selection.In, []string{checkBuildConfig.Name}) - if err != nil { - return err - } - // Make a selector based on our requirement - ourBuildSelector := labels.NewSelector().Add(*ourBuildReq) + return err + } - // Retrieve those builds that belong to this buildconfig - builds, err := ctrl.bLister.Builds(ctrlcommon.MCONamespace).List(ourBuildSelector) - if err != nil { - return err + maybeIgnoreMissing := func(f func() error) func() error { + return func() error { + if ignoreMissing { + return ignoreIsNotFoundErr(f()) + } + + return f() } + } - // If builds exist for this buildconfig - if len(builds) > 0 { - // Sort the builds in descending order, we want the newest first - sort.Slice(builds, func(i, j int) bool { - return builds[i].CreationTimestamp.After(builds[j].CreationTimestamp.Time) - }) - - // This is the newest, and we know it can't be outof bounds because of how we got here - // TODO(jkyros): If a newer build has been queued, should we terminate the old one? - mostRecentBuild := builds[0] - - // TODO(jkyros): We need to find a "level triggered" way to figure out if the image we have is representative - // of the state of our "build ladder" so we know if a failed build is a problem or not. Ultimately a metadata problem. - if mostRecentPoolImage == nil || mostRecentPoolImage.Created.Before(&mostRecentBuild.CreationTimestamp) { - // If they failed/are in bad phase, we're probably in trouble - switch mostRecentBuild.Status.Phase { - case buildv1.BuildPhaseError: - glog.Errorf("Need to degrade, build %s is %s", mostRecentBuild.Name, mostRecentBuild.Status.Phase) - case buildv1.BuildPhaseFailed: - glog.Errorf("Need to degrade, build %s is %s", mostRecentBuild.Name, mostRecentBuild.Status.Phase) - case buildv1.BuildPhaseCancelled: - glog.Errorf("Need to degrade, build %s is %s", mostRecentBuild.Name, mostRecentBuild.Status.Phase) - case buildv1.BuildPhaseComplete: - glog.Errorf("A build %s has completed for pool %s", mostRecentBuild.Name, mostRecentBuild.Status.Phase) - default: - // If they worked okay, we're building, we can update our status? - glog.Infof("A build %s is in progress (%s) for pool %s", mostRecentBuild.Name, mostRecentBuild.Status.Phase, pool.Name) - } + // If *any* of these we fail, we want to emit an error. If *all* fail, we + // want all of the error messages. + return aggerrors.AggregateGoroutines( + maybeIgnoreMissing(deleteBuildObject), + maybeIgnoreMissing(deleteMCConfigMap), + maybeIgnoreMissing(deleteDockerfileConfigMap), + ) +} - } - } +// Marks a given MachineConfigPool as build successful and cleans up after itself. +func (ctrl *Controller) markBuildSucceeded(pool *mcfgv1.MachineConfigPool) error { + klog.Infof("Build succeeded for MachineConfigPool %s, config %s", pool.Name, pool.Spec.Configuration.Name) + // Get the final image pullspec. + imagePullspec, err := ctrl.imageBuilder.FinalPullspec(pool) + if err != nil { + return fmt.Errorf("could not get final image pullspec for pool %s: %w", pool.Name, err) } - // Do we have an image stream for this pool? We should if we got here. - is, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(poolImageStream.Name) - if apierrors.IsNotFound(err) { - // TODO(jkyros): As cgwalters points out, this should probably degrade because it should exist - glog.Warningf("ImageStream for %s does not exist (yet?): %s", pool.Name, err) - } else { - // If there is an image ready, annotate the pool with it so node controller can use it if it's the right one - err := ctrl.annotatePoolWithNewestImage(is, pool) - if err != nil { - return err - } + if imagePullspec == "" { + return fmt.Errorf("image pullspec empty for pool %s", pool.Name) } - // TODO(jkyros): Only update if we changed, don't always update. Also, if we update here and then update status again, that seems - // wasteful. - _, err = ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}) - if err != nil { - return err + // Perform the post-build cleanup. + if err := ctrl.postBuildCleanup(pool, false); err != nil { + return fmt.Errorf("could not do post-build cleanup: %w", err) } - return ctrl.syncAvailableStatus(pool) - -} + // Set the annotation or field to point to the newly-built container image. + klog.V(4).Infof("Setting new image pullspec for %s to %s", pool.Name, imagePullspec) + if pool.Annotations == nil { + pool.Annotations = map[string]string{} + } + pool.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] = imagePullspec -// Machine Config Pools + // Remove the build object reference from the MachineConfigPool since we're + // not using it anymore. + deleteBuildRefFromMachineConfigPool(pool) -func (ctrl *Controller) addMachineConfigPool(obj interface{}) { - pool := obj.(*mcfgv1.MachineConfigPool) - glog.V(4).Infof("Adding MachineConfigPool %s", pool.Name) - ctrl.enqueueMachineConfigPool(pool) + // Adjust the MachineConfigPool status to indicate success. + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Reason: "BuildSucceeded", + Status: corev1.ConditionTrue, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolDegraded, + Status: corev1.ConditionFalse, + }, + }) + // Perform the MachineConfigPool update. + return ctrl.updatePoolAndSyncStatus(pool, ctrl.syncAvailableStatus) } -func (ctrl *Controller) updateMachineConfigPool(old, cur interface{}) { - oldPool := old.(*mcfgv1.MachineConfigPool) - curPool := cur.(*mcfgv1.MachineConfigPool) +// Marks a given MachineConfigPool as build pending. +func (ctrl *Controller) markBuildPendingWithObjectRef(pool *mcfgv1.MachineConfigPool, objRef corev1.ObjectReference) error { + klog.Infof("Build for %s marked pending with object reference %v", pool.Name, objRef) - glog.V(4).Infof("Updating MachineConfigPool %s", oldPool.Name) - ctrl.enqueueMachineConfigPool(curPool) -} + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildPending, + Reason: "BuildPending", + Status: corev1.ConditionTrue, + }, + }) -func (ctrl *Controller) deleteMachineConfigPool(obj interface{}) { - pool, ok := obj.(*mcfgv1.MachineConfigPool) - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) - return - } - pool, ok = tombstone.Obj.(*mcfgv1.MachineConfigPool) - if !ok { - utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a MachineConfigPool %#v", obj)) - return - } + // If the MachineConfigPool has the build object reference, we just want to + // update the MachineConfigPool's status. + if machineConfigPoolHasObjectRef(pool, objRef) { + return ctrl.syncAvailableStatus(pool) } - glog.V(4).Infof("Deleting MachineConfigPool %s", pool.Name) + + // If we added the build object reference, we need to update both the + // MachineConfigPool itself and its status. + addObjectRefIfMissing(pool, objRef) + return ctrl.updatePoolAndSyncStatus(pool, ctrl.syncAvailableStatus) } -// ImagStreams +func (ctrl *Controller) markBuildPending(pool *mcfgv1.MachineConfigPool) error { + klog.Infof("Build for %s marked pending", pool.Name) -func (ctrl *Controller) addImageStream(obj interface{}) { + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildPending, + Reason: "BuildPending", + Status: corev1.ConditionTrue, + }, + }) + return ctrl.syncAvailableStatus(pool) } -func (ctrl *Controller) updateImageStream(old, cur interface{}) { - imagestream := cur.(*imagev1.ImageStream) - controllerRef := metav1.GetControllerOf(imagestream) - - if controllerRef != nil { +func (ctrl *Controller) updatePoolAndSyncStatus(pool *mcfgv1.MachineConfigPool, statusFunc func(*mcfgv1.MachineConfigPool) error) error { + // We need to do an API server round-trip to ensure all of our mutations get + // propagated. + updatedPool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update MachineConfigPool %q: %w", pool.Name, err) + } - if pool := ctrl.resolveControllerRef(controllerRef); pool != nil { + updatedPool.Status = pool.Status - glog.Infof("ImageStream %s changed for pool %s", imagestream.Name, pool.Name) + return statusFunc(updatedPool) +} - // TODO(jkyros): This is a race I usually win, but I won't always, and we need a better - // way to get this metadata in - if imagestream.Name == pool.Name+ctrlcommon.ImageStreamSuffixRenderedConfig { - ctrl.cheatMachineConfigLabelIntoBuildConfig(imagestream, pool) - } +// Machine Config Pools - ctrl.enqueueMachineConfigPool(pool) +func (ctrl *Controller) addMachineConfigPool(obj interface{}) { + pool := obj.(*mcfgv1.MachineConfigPool).DeepCopy() + klog.V(4).Infof("Adding MachineConfigPool %s", pool.Name) + ctrl.enqueueMachineConfigPool(pool) +} - } +// Prepares all of the objects needed to perform an image build. +func (ctrl *Controller) prepareMachineConfigForPool(ibr ImageBuildRequest) error { + mc, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigs().Get(context.TODO(), ibr.Pool.Spec.Configuration.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("could not get MachineConfig %s: %w", ibr.Pool.Spec.Configuration.Name, err) + } + mcConfigMap, err := ibr.toConfigMap(mc) + if err != nil { + return fmt.Errorf("could not convert MachineConfig %s into ConfigMap: %w", mc.Name, err) } -} -func (ctrl *Controller) deleteImageStream(obj interface{}) { - // TODO(jkyros): probably worth enqueueing the pool again here just so - // our sync can figure out that this newly-deleted stream is now empty and update the mappings ? -} + _, err = ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Create(context.TODO(), mcConfigMap, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("could not load rendered MachineConfig %s into configmap: %w", mcConfigMap.Name, err) + } -// Builds + klog.Infof("Stored MachineConfig %s in ConfigMap %s for build", mc.Name, mcConfigMap.Name) -func (ctrl *Controller) addBuild(obj interface{}) { - build := obj.(*buildv1.Build) + dockerfileConfigMap, err := ibr.dockerfileToConfigMap() + if err != nil { + return fmt.Errorf("could not generate Dockerfile ConfigMap: %w", err) + } - glog.Infof("Added a build: %s", build.Name) + _, err = ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Create(context.TODO(), dockerfileConfigMap, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("could not load rendered MachineConfig %s into configmap: %w", dockerfileConfigMap.Name, err) + } - // TODO(jkyros): Is this one of our builds that belongs to our imagestream? - // If it is, we should mark that somewhere so we know the pool is "building" + klog.Infof("Stored Dockerfile for build %s in ConfigMap %s for build", ibr.getBuildName(), dockerfileConfigMap.Name) + return nil } -func (ctrl *Controller) updateBuild(old, cur interface{}) { - build := old.(*buildv1.Build) - - glog.Infof("Updated a build: %s", build.Name) - // Builds will move through phases which cause them to change - // Most of those phases are standard/good, but some of them are bad - // We want to know if we end up in a bad phase and need to retry - ctrl.enqueuePoolIfBuildProblems(build) - -} +// Determines if we should run a build, then starts a build pod to perform the +// build, and updates the MachineConfigPool with an object reference for the +// build pod. +func (ctrl *Controller) startBuildForMachineConfigPool(pool *mcfgv1.MachineConfigPool) error { + osImageURLConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), machineConfigOSImageURLConfigMapName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("could not get OS image URL: %w", err) + } -func (ctrl *Controller) deleteBuild(obj interface{}) { - build := obj.(*buildv1.Build) + onClusterBuildConfigMap, err := ctrl.getOnClusterBuildConfig(pool) + if err != nil { + return fmt.Errorf("could not get configmap %q: %w", onClusterBuildConfigMapName, err) + } - glog.Infof("Deleted a build: %s", build.Name) + ibr := newImageBuildRequestWithConfigMap(pool, osImageURLConfigMap, onClusterBuildConfigMap) + return ctrl.handleImageBuildRequest(ibr) } -// Buildconfigs +// Gets the ConfigMap which specifies the name of the base image pull secret, final image pull secret, and final image pullspec. +func (ctrl *Controller) getOnClusterBuildConfig(pool *mcfgv1.MachineConfigPool) (*corev1.ConfigMap, error) { + onClusterBuildConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), onClusterBuildConfigMapName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get build controller config %q: %w", onClusterBuildConfigMapName, err) + } -func (ctrl *Controller) addBuildConfig(obj interface{}) { - buildconfig := obj.(*buildv1.BuildConfig) + requiredKeys := []string{ + baseImagePullSecretNameConfigKey, + finalImagePushSecretNameConfigKey, + finalImagePullspecConfigKey, + } - glog.Infof("Added a buildconfig: %s", buildconfig.Name) + needToUpdateConfigMap := false + finalImagePullspecWithTag := "" -} - -func (ctrl *Controller) updateBuildConfig(old, cur interface{}) { - buildconfig := old.(*buildv1.BuildConfig) - newbuildconfig := cur.(*buildv1.BuildConfig) - - glog.Infof("Updated a buildconfig: %s", buildconfig.Name) - - // Every time a buildconfig is instantiated it bumps the generation, so it always looks like it's changing - // For now we really only care if the user edited the dockerfile, and that string is a pointer - if buildconfig.Spec.Source.Dockerfile != nil && newbuildconfig.Spec.Source.Dockerfile != nil { - if *buildconfig.Spec.Source.Dockerfile != *newbuildconfig.Spec.Source.Dockerfile { - - glog.Infof("The dockerfile for buildconfig %s changed, triggering a build", buildconfig.Name) - // TODO(jkyros); If this is the mco content, we need the machineconfig name - // so go get the image from that imagestream and put the name in. Otherwise just start it. - - br := &buildv1.BuildRequest{ - //TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{Name: buildconfig.Name}, - //Env: []corev1.EnvVar{whichConfig}, - TriggeredBy: []buildv1.BuildTriggerCause{ - {Message: "The machine config controller"}, - }, - DockerStrategyOptions: &buildv1.DockerStrategyOptions{ - //BuildArgs: []corev1.EnvVar{whichConfig}, - //NoCache: new(bool), - }, - } + for _, key := range requiredKeys { + val, ok := onClusterBuildConfigMap.Data[key] + if !ok { + return nil, fmt.Errorf("missing required key %q in configmap %s", key, onClusterBuildConfigMapName) + } - _, err := ctrl.buildclient.BuildV1().BuildConfigs(ctrlcommon.MCONamespace).Instantiate(context.TODO(), br.Name, br, metav1.CreateOptions{}) + if key == baseImagePullSecretNameConfigKey || key == finalImagePushSecretNameConfigKey { + secret, err := ctrl.validatePullSecret(val) if err != nil { - glog.Errorf("Failed to trigger image build: %s", err) + return nil, err } - } - } - -} - -func (ctrl *Controller) deleteBuildConfig(obj interface{}) { - buildconfig := obj.(*buildv1.BuildConfig) - - glog.Infof("Deleted a buildconfig: %s", buildconfig.Name) -} - -// experimentalAddEntitlements grabs the cluster entitlement certificates out of the openshift-config-managed namespace and -// stuffs them into a machineconfig for our layered pool, so we can have entitled builds. This is a terrible practice, and -// we should probably just sync the secrets into our namespace so our builds can use them directly rather than expose them via machineconfig. -func (ctrl *Controller) experimentalAddEntitlements(pool *mcfgv1.MachineConfigPool) { - - var entitledConfigName = fmt.Sprintf("99-%s-entitled-build", pool.Name) - - // If it's not there, put it there, otherwise do nothing - _, err := ctrl.client.MachineconfigurationV1().MachineConfigs().Get(context.TODO(), entitledConfigName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - - // Repo configuration for redhat package entitlements ( I just added base and appstream) - // TODO(jkyros): do this right once subscription-manager is included in RHCOS - redhatRepo := `[rhel-8-for-x86_64-baseos-rpms] -name = Red Hat Enterprise Linux 8 for x86_64 - BaseOS (RPMs) -baseurl = https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os -enabled = 1 -gpgcheck = 0 -sslverify = 0 -sslclientkey = /etc/pki/entitlement/entitlement-key.pem -sslclientcert = /etc/pki/entitlement/entitlement.pem -metadata_expire = 86400 -enabled_metadata = 1 - -[rhel-8-for-x86_64-appstream-rpms] -name = Red Hat Enterprise Linux 8 for x86_64 - AppStream (RPMs) -baseurl = https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os -enabled = 1 -gpgcheck = 0 -sslverify = 0 -sslclientkey = /etc/pki/entitlement/entitlement-key.pem -sslclientcert = /etc/pki/entitlement/entitlement.pem -metadata_expire = 86400 -enabled_metadata = 1 -` - - // Make an ignition to stuff into our machineconfig - ignConfig := ctrlcommon.NewIgnConfig() - ignConfig.Storage.Files = append(ignConfig.Storage.Files, NewIgnFile("/etc/yum.repos.d/redhat.repo", redhatRepo)) - - // Get our entitlement secrets out of the managed namespace - entitlements, err := ctrl.kubeclient.CoreV1().Secrets("openshift-config-managed").Get(context.TODO(), "etc-pki-entitlement", metav1.GetOptions{}) - if err != nil { - glog.Warningf("Could not retrieve entitlement secret: %s", err) - return + if strings.Contains(secret.Name, "canonical") { + klog.Infof("Updating build controller config %s to indicate we have a canonicalized secret %s", onClusterBuildConfigMapName, secret.Name) + onClusterBuildConfigMap.Data[key] = secret.Name + needToUpdateConfigMap = true + } } - // Add the key to the file list - if key, ok := entitlements.Data["entitlement-key.pem"]; ok { - ignConfig.Storage.Files = append(ignConfig.Storage.Files, NewIgnFile("/etc/pki/entitlement/entitlement-key.pem", string(key))) - } + if key == finalImagePullspecConfigKey { + // Replace the user-supplied tag (if present) with the name of the + // rendered MachineConfig for uniqueness. This will also allow us to + // eventually do a pre-build registry query to determine if we need to + // perform a build. + named, err := reference.ParseNamed(val) + if err != nil { + return nil, fmt.Errorf("could not parse %s with %q: %w", finalImagePullspecConfigKey, val, err) + } - // Add the public key to the file list - if pub, ok := entitlements.Data["entitlement.pem"]; ok { - ignConfig.Storage.Files = append(ignConfig.Storage.Files, NewIgnFile("/etc/pki/entitlement/entitlement.pem", string(pub))) - } + tagged, err := reference.WithTag(named, pool.Spec.Configuration.Name) + if err != nil { + return nil, fmt.Errorf("could not add tag %s to image pullspec %s: %w", pool.Spec.Configuration.Name, val, err) + } - // Now it's a machineconfig - mc, err := ctrlcommon.MachineConfigFromIgnConfig(pool.Name, entitledConfigName, ignConfig) - if err != nil { - glog.Warningf("Could not create machineconfig for entitlements: %s", err) + finalImagePullspecWithTag = tagged.String() } + } - // Add it to the list for this pool - _, err = ctrl.client.MachineconfigurationV1().MachineConfigs().Create(context.TODO(), mc, metav1.CreateOptions{}) + // If we had to canonicalize a secret, that means the ConfigMap no longer + // points to the expected secret. So let's update the ConfigMap in the API + // server for the sake of consistency. + if needToUpdateConfigMap { + klog.Infof("Updating build controller config") + // TODO: Figure out why this causes failures with resourceVersions. + onClusterBuildConfigMap, err = ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Update(context.TODO(), onClusterBuildConfigMap, metav1.UpdateOptions{}) if err != nil { - glog.Warningf("Failed to add entitlements to layered pool: %s", err) + return nil, fmt.Errorf("could not update configmap %q: %w", onClusterBuildConfigMapName, err) } } -} + // We don't want to write this back to the API server since it's only useful + // this specific build. TODO: Migrate this to the ImageBuildRequest object so + // that it's generated on-demand instead. + onClusterBuildConfigMap.Data[finalImagePullspecConfigKey] = finalImagePullspecWithTag -// annotatePoolWithNewestImage looks in the corresponding image stream for a pool and annotates the name of the image, and it's -// corresponding rendered-config, which it retrieves from the image's docker metadata labels that we added during our build -func (ctrl *Controller) annotatePoolWithNewestImage(imageStream *imagev1.ImageStream, pool *mcfgv1.MachineConfigPool) error { + return onClusterBuildConfigMap, err +} - // We don't want to crash if these are empty - if pool.Annotations == nil { - pool.Annotations = map[string]string{} +// Ensure that the supplied pull secret exists, is in the correct format, etc. +func (ctrl *Controller) validatePullSecret(name string) (*corev1.Secret, error) { + secret, err := ctrl.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, err } - // Grab the latest tag from the imagestream. If we don't have one, nothing happens - for _, tag := range imageStream.Status.Tags { - if len(tag.Items) == 0 { - continue - } + oldSecretName := secret.Name - // I might have an older image that has right machine config content, but some - // other content might have changed (like, I dunno, base image) so we shouldn't go back - // to older images - image := tag.Items[0] + secret, err = canonicalizePullSecret(secret) + if err != nil { + return nil, err + } - // If this is different than our current tag, grab it and annotate the pool - glog.Infof("imagestream %s newest is: %s (%s)", imageStream.Name, image.DockerImageReference, image.Image) - if pool.Spec.Configuration.Name == image.Image { - // We're already theer, don't touch it - return nil - } + // If a Docker pull secret lacks the top-level "auths" key, this means that + // it is a legacy-style pull secret. Buildah and Skopeo do not know how to + // correctly use one of these secrets. With that in mind, we "canonicalize" + // it, meaning we inject the existing legacy secret into a {"auths": {}} + // schema that Buildah and Skopeo can understand. We create a new K8s secret + // with this info and pass that secret into our image builder instead. + if strings.HasSuffix(secret.Name, canonicalSecretSuffix) { + klog.Infof("Found legacy-style secret %s, canonicalizing as %s", oldSecretName, secret.Name) + return ctrl.handleCanonicalizedPullSecret(secret) + } - // get the actual image so we can read its labels - fullImage, err := ctrl.imageclient.ImageV1().Images().Get(context.TODO(), image.Image, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("Could not retrieve image %s: %w", image.Image, err) - } + return secret, nil +} - // We need the labels out of the docker image but it's a raw extension - dockerLabels := struct { - Config struct { - Labels map[string]string `json:"Labels"` - } `json:"Config"` - }{} +// Attempt to create a canonicalized pull secret. If the secret already exsits, we should update it. +func (ctrl *Controller) handleCanonicalizedPullSecret(secret *corev1.Secret) (*corev1.Secret, error) { + out, err := ctrl.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Get(context.TODO(), secret.Name, metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return nil, fmt.Errorf("could not get canonical secret %q: %w", secret.Name, err) + } - // Get the labels out and see what config this is - err = json.Unmarshal(fullImage.DockerImageMetadata.Raw, &dockerLabels) + // We don't have a canonical secret, so lets create one. + if k8serrors.IsNotFound(err) { + out, err = ctrl.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) if err != nil { - return fmt.Errorf("Could not get labels from docker image metadata: %w", err) + return nil, fmt.Errorf("could not create canonical secret %q: %w", secret.Name, err) } - // Tag what config this came from so we know it's the right image - if machineconfig, ok := dockerLabels.Config.Labels["machineconfig"]; ok { - pool.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] = machineconfig - pool.Spec.Configuration.ObjectReference = corev1.ObjectReference{ - Kind: "Image", - Name: image.Image, - } - // TODO(jkyros): Kind of cheating using this as metadata showback for the user until we figure out our "level triggering" strategy - pool.Spec.Configuration.Source = []corev1.ObjectReference{ - // What machine config was the assigned image build using - {Kind: "MachineConfig", Name: machineconfig, Namespace: ctrlcommon.MCONamespace}, - // What imagestream did it come out of - {Kind: "ImageStream", Name: imageStream.Name, Namespace: ctrlcommon.MCONamespace}, - // The non-sha image reference just for convenience - {Kind: "DockerImageReference", Name: image.DockerImageReference, Namespace: ctrlcommon.MCONamespace}, - } - - } + klog.Infof("Created canonical secret %s", secret.Name) + return out, nil + } - // TODO(jkyros): Probably need to go through our eventing "state machine" to make sure our steps make sense - ctrl.eventRecorder.Event(pool, corev1.EventTypeNormal, "Updated", "Moved pool "+pool.Name+" to layered image "+image.DockerImageReference) + // Check if the canonical secret from the API server matches the one we have. + // If they match, then we don't need to do an update. + if bytes.Equal(secret.Data[corev1.DockerConfigJsonKey], out.Data[corev1.DockerConfigJsonKey]) { + klog.Infof("Canonical secret %q up-to-date", secret.Name) + return out, nil + } + // If we got here, it means that our secret needs to be updated. + out.Data = secret.Data + out, err = ctrl.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Update(context.TODO(), out, metav1.UpdateOptions{}) + if err != nil { + return nil, fmt.Errorf("could not update canonical secret %q: %w", secret.Name, err) } - return nil + klog.Infof("Updated canonical secret %s", secret.Name) + + return out, nil } -func (ctrl *Controller) CreateBuildConfigForImageStream(pool *mcfgv1.MachineConfigPool, buildConfigName, sourceImageStreamName string, targetImageStream *imagev1.ImageStream, dockerFile string, triggerOnImageTags ...string) (*buildv1.BuildConfig, error) { - // Construct a buildconfig for this pool if it doesn't exist +// Starts a build for a given Image Build Request. +func (ctrl *Controller) handleImageBuildRequest(ibr ImageBuildRequest) error { + err := ctrl.prepareMachineConfigForPool(ibr) + if err != nil { + return fmt.Errorf("could not start build for MachineConfigPool %s: %w", ibr.Pool.Name, err) + } - skipLayers := buildv1.ImageOptimizationSkipLayers - buildConfig := &buildv1.BuildConfig{ + objRef, err := ctrl.imageBuilder.StartBuild(ibr) - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: buildConfigName, - Namespace: ctrlcommon.MCONamespace, - Annotations: map[string]string{ - "machineconfiguration.openshift.io/pool": pool.Name, - }, - }, - Spec: buildv1.BuildConfigSpec{ - RunPolicy: "Serial", - // Simple dockerfile build, just the text from the dockerfile - CommonSpec: buildv1.CommonSpec{ - Source: buildv1.BuildSource{ - Type: "Dockerfile", - Dockerfile: &dockerFile, - }, - Strategy: buildv1.BuildStrategy{ - DockerStrategy: &buildv1.DockerBuildStrategy{ - // This will override the last FROM in our builds, but we want that - From: &corev1.ObjectReference{ - Kind: "ImageStreamTag", - Name: sourceImageStreamName + ":latest", - }, - // Squashing layers is good as long as it doesn't cause problems with what - // the users want to do. It says "some syntax is not supported" - ImageOptimizationPolicy: &skipLayers, - }, - Type: "Docker", - }, - // Output to the imagestreams we made before - Output: buildv1.BuildOutput{ - To: &corev1.ObjectReference{ - Kind: "ImageStreamTag", - Name: targetImageStream.Name + ":latest", - }, - ImageLabels: []buildv1.ImageLabel{ - // The pool that this image was built for - {Name: "io.openshift.machineconfig.pool", Value: pool.Name}, - }, - // TODO(jkyros): I want to label these images with which rendered config they were built from - // but there doesn't seem to be a way to get it in there easily - }, - }, - - Triggers: []buildv1.BuildTriggerPolicy{ - { - // This blank one signifies "just trigger on the from image specified in the strategy" - Type: "ImageChange", - ImageChange: &buildv1.ImageChangeTrigger{}, - }, - }, - }, + if err != nil { + return err } - // Pause the custom build config by default so it doesn't build automatically unless we enable it - if buildConfigName == pool.Name+"-build"+ctrlcommon.ImageStreamSuffixMCOContentCustom { - buildConfig.Spec.Triggers[0].ImageChange.Paused = true + return ctrl.markBuildPendingWithObjectRef(ibr.Pool, *objRef) +} + +// If one wants to opt out, this removes all of the statuses and object +// references from a given MachineConfigPool. +func (ctrl *Controller) finalizeOptOut(pool *mcfgv1.MachineConfigPool) error { + if err := ctrl.postBuildCleanup(pool, true); err != nil { + return err } - // TODO(jkyros): pull this out if we handle these triggers ourselves, because we might need the control - // If additional triggers, add them to the config + deleteBuildRefFromMachineConfigPool(pool) - for _, tag := range triggerOnImageTags { - buildConfig.Spec.Triggers = append(buildConfig.Spec.Triggers, buildv1.BuildTriggerPolicy{ - Type: "ImageChange", - ImageChange: &buildv1.ImageChangeTrigger{ - LastTriggeredImageID: "", - From: &corev1.ObjectReference{ - Kind: "ImageStreamTag", - Name: tag, - }, - }, - }) + delete(pool.Annotations, ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey) - } + conditions := []mcfgv1.MachineConfigPoolCondition{} - // Set the owner references so these get cleaned up if the pool gets deleted - poolKind := mcfgv1.SchemeGroupVersion.WithKind("MachineConfigPool") - oref := metav1.NewControllerRef(pool, poolKind) - buildConfig.SetOwnerReferences([]metav1.OwnerReference{*oref}) + for _, condition := range pool.Status.Conditions { + buildConditionFound := false + for _, buildConditionType := range getMachineConfigPoolBuildConditions() { + if condition.Type == buildConditionType { + buildConditionFound = true + break + } + } - // Create the buildconfig - return ctrl.buildclient.BuildV1().BuildConfigs(ctrlcommon.MCONamespace).Create(context.TODO(), buildConfig, metav1.CreateOptions{}) + if !buildConditionFound { + conditions = append(conditions, condition) + } + } + pool.Status.Conditions = conditions + return ctrl.updatePoolAndSyncStatus(pool, ctrl.syncAvailableStatus) } -// TODO(jkyros): don't leave this here, expose it properly if you're gonna use it -// StrToPtr returns a pointer to a string -func StrToPtr(s string) *string { - return &s -} +// Fires whenever a MachineConfigPool is updated. +func (ctrl *Controller) updateMachineConfigPool(old, cur interface{}) { + oldPool := old.(*mcfgv1.MachineConfigPool).DeepCopy() + curPool := cur.(*mcfgv1.MachineConfigPool).DeepCopy() -// TODO(jkyros): don't leave this here, expose it properly if you're gonna use it -// NewIgnFile returns a simple ignition3 file from just path and file contents -func NewIgnFile(path, contents string) ign3types.File { - return ign3types.File{ - Node: ign3types.Node{ - Path: path, - }, - FileEmbedded1: ign3types.FileEmbedded1{ - Contents: ign3types.Resource{ - Source: StrToPtr(dataurl.EncodeBytes([]byte(contents)))}, - }, - } -} + klog.V(4).Infof("Updating MachineConfigPool %s", oldPool.Name) -// TODO(jkyros): some quick functions to go with our image stream informer so we can watch imagestream update -func (ctrl *Controller) resolveControllerRef(controllerRef *metav1.OwnerReference) *mcfgv1.MachineConfigPool { - // We can't look up by UID, so look up by Name and then verify UID. - // Don't even try to look up by Name if it's the wrong Kind. - if controllerRef.Kind != controllerKind.Kind { - return nil - } - pool, err := ctrl.mcpLister.Get(controllerRef.Name) + doABuild, err := shouldWeDoABuild(ctrl.imageBuilder, oldPool, curPool) if err != nil { - return nil + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return } - if pool.UID != controllerRef.UID { - // The controller we found with this Name is not the same one that the - // ControllerRef points to. - return nil + switch { + // We've transitioned from a layered pool to a non-layered pool. + case ctrlcommon.IsLayeredPool(oldPool) && !ctrlcommon.IsLayeredPool(curPool): + klog.V(4).Infof("MachineConfigPool %s has opted out of layering", curPool.Name) + if err := ctrl.finalizeOptOut(curPool); err != nil { + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return + } + // We need to do a build. + case doABuild: + klog.V(4).Infof("MachineConfigPool %s has changed, requiring a build", curPool.Name) + if err := ctrl.startBuildForMachineConfigPool(curPool); err != nil { + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return + } + // Everything else. + default: + klog.V(4).Infof("MachineConfigPool %s up-to-date", curPool.Name) } - return pool + + ctrl.enqueueMachineConfigPool(curPool) } -//nolint:unparam -func (ctrl *Controller) getMostRecentImageTagForImageStream(poolImageStream *imagev1.ImageStream, desiredTag string) *imagev1.TagEvent { - // Get the most recent image - for _, tag := range poolImageStream.Status.Tags { - if tag.Tag == desiredTag { - // TODO(jkyros): don't crash if this is empty - if len(tag.Items) > 0 { - return &tag.Items[0] - } +// Fires whenever a MachineConfigPool is deleted. TODO: Wire up checks for +// deleting any in-progress builds. +func (ctrl *Controller) deleteMachineConfigPool(obj interface{}) { + pool, ok := obj.(*mcfgv1.MachineConfigPool) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + return + } + pool, ok = tombstone.Obj.(*mcfgv1.MachineConfigPool) + if !ok { + utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a MachineConfigPool %#v", obj)) + return } } - return nil + klog.V(4).Infof("Deleting MachineConfigPool %s", pool.Name) } func (ctrl *Controller) syncAvailableStatus(pool *mcfgv1.MachineConfigPool) error { - if mcfgv1.IsMachineConfigPoolConditionFalse(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded) { - return nil - } + // I'm not sure what the consequences are of not doing this. + //nolint:gocritic // Leaving this here for review purposes. + /* + if mcfgv1.IsMachineConfigPoolConditionFalse(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded) { + return nil + } + */ sdegraded := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionFalse, "", "") mcfgv1.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) - if _, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { + + if _, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { return err } + return nil } func (ctrl *Controller) syncFailingStatus(pool *mcfgv1.MachineConfigPool, err error) error { - sdegraded := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionTrue, "", fmt.Sprintf("Failed to render configuration for pool %s: %v", pool.Name, err)) + sdegraded := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionTrue, "", fmt.Sprintf("Failed to build configuration for pool %s: %v", pool.Name, err)) mcfgv1.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) - if _, updateErr := ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); updateErr != nil { - glog.Errorf("Error updating MachineConfigPool %s: %v", pool.Name, updateErr) + if _, updateErr := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); updateErr != nil { + klog.Errorf("Error updating MachineConfigPool %s: %v", pool.Name, updateErr) } return err } -func (ctrl *Controller) updateBuildConfigWithLabels(buildConfig *buildv1.BuildConfig, labels map[string]string) (*buildv1.BuildConfig, error) { - - newBuildConfig := buildConfig.DeepCopy() - for labelKey, labelValue := range labels { - il := buildv1.ImageLabel{Name: labelKey, Value: labelValue} - newBuildConfig.Spec.Output.ImageLabels = append(newBuildConfig.Spec.Output.ImageLabels, il) +// Searches a MachineConfigPoolStatusConfiguration for a given object reference. +func machineConfigPoolObjectRefSearchFunc(cfg mcfgv1.MachineConfigPoolStatusConfiguration, objRef corev1.ObjectReference) bool { + for _, src := range cfg.Source { + if src == objRef { + return true + } } - return ctrl.buildclient.BuildV1().BuildConfigs(ctrlcommon.MCONamespace).Update(context.TODO(), newBuildConfig, metav1.UpdateOptions{}) + return false } -// ensureCoreOSImageStream creates the base CoreOS imagestream that is owned by no pool and serves as the default source of the -// base images for the layered pools' base image streams -func (ctrl *Controller) ensureCoreOSImageStream() (*imagev1.ImageStream, error) { +// Determines if a MachineConfigPool contains a given ObjectReference. +func machineConfigPoolHasObjectRef(pool *mcfgv1.MachineConfigPool, objRef corev1.ObjectReference) bool { + return machineConfigPoolObjectRefSearchFunc(pool.Spec.Configuration, objRef) && + machineConfigPoolObjectRefSearchFunc(pool.Status.Configuration, objRef) +} - checkImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(ctrlcommon.CoreOSImageStreamName) - if apierrors.IsNotFound(err) { - controllerConfig, err := ctrl.ccLister.Get(ctrlcommon.ControllerConfigName) - if err != nil { - return nil, fmt.Errorf("could not get ControllerConfig %w", err) - } +// Determines if a MachineConfigPool contains a reference to a Build or custom build pod. +func machineConfigPoolHasBuildRef(pool *mcfgv1.MachineConfigPool) bool { + buildName := newImageBuildRequest(pool).getBuildName() - newImageStream := &imagev1.ImageStream{ - ObjectMeta: metav1.ObjectMeta{ - Name: ctrlcommon.CoreOSImageStreamName, - Namespace: ctrlcommon.MCONamespace, - }, - Spec: imagev1.ImageStreamSpec{ - LookupPolicy: imagev1.ImageLookupPolicy{Local: false}, - DockerImageRepository: "", - Tags: []imagev1.TagReference{ - { - Name: "latest", - From: &corev1.ObjectReference{ - Kind: "DockerImage", - Name: controllerConfig.Spec.OSImageURL, - }, - }, - }, - }, - } - checkImageStream, err = ctrl.imageclient.ImageV1().ImageStreams(ctrlcommon.MCONamespace).Create(context.TODO(), newImageStream, metav1.CreateOptions{}) - if err != nil { - return nil, fmt.Errorf("Attempted to create ImageStream %s but failed: %w", newImageStream, err) + searchFunc := func(cfg mcfgv1.MachineConfigPoolStatusConfiguration) bool { + for _, src := range cfg.Source { + if src.Name == buildName && src.Kind != "MachineConfig" { + return true + } } - glog.Infof("Created image stream %s", ctrlcommon.CoreOSImageStreamName) - } else if err != nil { - return nil, err - } - return checkImageStream, nil + return false + } + return searchFunc(pool.Spec.Configuration) && searchFunc(pool.Status.Configuration) } -func (ctrl *Controller) ensureImageStreamForPool(pool *mcfgv1.MachineConfigPool, imageStreamName string, pbr *PoolResourceNames) (*imagev1.ImageStream, error) { - // Check to see if we have the imagestream already - checkImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(imageStreamName) - if apierrors.IsNotFound(err) { - // Create the imagestream if it doesn't already exist - // It doesn't exist, so we need to make it, otherwise our builds will fail - newImageStream := &imagev1.ImageStream{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "machineconfiguration.openshift.io/pool": pool.Name, - }, - }, - } - newImageStream.Name = imageStreamName - newImageStream.Namespace = ctrlcommon.MCONamespace - newImageStream.Spec.LookupPolicy.Local = false - - // Set ownerships so these get cleaned up if we delete the pool - // TODO(jkyros): I have no idea if this actually cleans the images out of the stream if we delete it? - poolKind := mcfgv1.SchemeGroupVersion.WithKind("MachineConfigPool") - oref := metav1.NewControllerRef(pool, poolKind) - newImageStream.SetOwnerReferences([]metav1.OwnerReference{*oref}) - - // coreos imagestream is base, it's special, it needs to pull that image - if imageStreamName == pbr.ImageStream.Base { - - newImageStream.Spec = imagev1.ImageStreamSpec{ - LookupPolicy: imagev1.ImageLookupPolicy{Local: false}, - DockerImageRepository: "", - Tags: []imagev1.TagReference{ - { - Name: "latest", - From: &corev1.ObjectReference{ - Kind: "DockerImage", - Name: "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/coreos", - }, - }, - }, - } +// Deletes the build pod references from the MachineConfigPool. +func deleteBuildRefFromMachineConfigPool(pool *mcfgv1.MachineConfigPool) { + buildPodName := newImageBuildRequest(pool).getBuildName() - } + deleteFunc := func(cfg mcfgv1.MachineConfigPoolStatusConfiguration) []corev1.ObjectReference { + configSources := []corev1.ObjectReference{} - // TODO(jkyros): your data structure for this is clearly inelegant, fix it - if imageStreamName == pbr.ImageStream.Base || imageStreamName == pbr.ImageStream.RenderedConfig { - newImageStream.Annotations["machineconfig.openshift.io/buildconfig"] = pbr.BuildConfig.Content.Name - } - if imageStreamName == pbr.ImageStream.Content { - newImageStream.Annotations["machineconfig.openshift.io/buildconfig"] = pbr.BuildConfig.CustomContent.Name + for _, src := range cfg.Source { + if src.Name != buildPodName { + configSources = append(configSources, src) + } } - // It didn't exist, put the imagestream in the cluster - checkImageStream, err = ctrl.imageclient.ImageV1().ImageStreams(ctrlcommon.MCONamespace).Create(context.TODO(), newImageStream, metav1.CreateOptions{}) - if err != nil { - return nil, fmt.Errorf("Attempted to create ImageStream %s but failed: %w", newImageStream, err) - } - glog.Infof("Created image stream %s", imageStreamName) + return configSources } - return checkImageStream, nil -} -func (ctrl *Controller) ensureBuildConfigForPool(pool *mcfgv1.MachineConfigPool, pbc *PoolBuildConfig) (*buildv1.BuildConfig, error) { - checkBuildConfig, err := ctrl.bcLister.BuildConfigs(ctrlcommon.MCONamespace).Get(pbc.Name) - if apierrors.IsNotFound(err) { + pool.Spec.Configuration.Source = deleteFunc(pool.Spec.Configuration) + pool.Status.Configuration.Source = deleteFunc(pool.Status.Configuration) +} - // We are making this buildconfig owned by the imagestream it's building to - // TODO(jkyros): I really do feel like the buildconfig belongs to the stream because it populates the stream - ownerStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(pbc.Target) - if err != nil { - return nil, fmt.Errorf("Failed to retrieve owner imagestream: %w", err) - } - // Make the build since it doesn't exist, and set checkBuildConfig so we can use it below - checkBuildConfig, err = ctrl.CreateBuildConfigForImageStream(pool, pbc.Name, pbc.Source, ownerStream, pbc.DockerfileContent, pbc.TriggeredByStreams...) - if err != nil { - return nil, err - } - glog.Infof("BuildConfig %s has been created for pool %s", pbc.Name, pool.Name) - } else if err != nil { - // some other error happened - return nil, err - } - return checkBuildConfig, nil +// Determines if two conditions are equal. Note: I purposely do not include the +// timestamp in the equality test, since we do not directly set it. +func isConditionEqual(cond1, cond2 mcfgv1.MachineConfigPoolCondition) bool { + return cond1.Type == cond2.Type && + cond1.Status == cond2.Status && + cond1.Message == cond2.Message && + cond1.Reason == cond2.Reason } -func (ctrl *Controller) enqueuePoolIfBuildProblems(build *buildv1.Build) { - // If it's in a good state, save the suffering and move on - if isGoodBuildPhase(build.Status.Phase) { - return +// Idempotently adds an ObjectRefence to a pool. +func addObjectRefIfMissing(pool *mcfgv1.MachineConfigPool, objRef corev1.ObjectReference) { + if !machineConfigPoolHasObjectRef(pool, objRef) { + pool.Spec.Configuration.Source = append(pool.Spec.Configuration.Source, objRef) + pool.Status.Configuration.Source = append(pool.Status.Configuration.Source, objRef) } +} - // TODO(jkyros): sequester this in a function somewhere - - // If it's in a bad phase, our pool might care if it's one of ours - - // See who owns the build - controllerRef := metav1.GetControllerOf(build) - - // If the build is owned by a buildconfig, see if it's one of ours - if controllerRef.Kind == "BuildConfig" { - buildConfig, err := ctrl.bcLister.BuildConfigs(ctrlcommon.MCONamespace).Get(controllerRef.Name) - if err != nil { - glog.Errorf("Failed to retrieve controlling buildconfig %s for build %s: %s", controllerRef.Name, build.Name, err) - } - - // See if the buildconfig is controlled by our pool - buildConfigControllerRef := metav1.GetControllerOf(buildConfig) - if controllerRef != nil { - pool := ctrl.resolveControllerRef(buildConfigControllerRef) - // If it is our pool, then enqueue it - if pool != nil { - ctrl.enqueueMachineConfigPool(pool) - } - +// Idempotently sets MCP build conditions on a given MachineConfigPool. +func setMCPBuildConditions(pool *mcfgv1.MachineConfigPool, conditions []mcfgv1.MachineConfigPoolCondition) { + for _, condition := range conditions { + condition := condition + currentCondition := mcfgv1.GetMachineConfigPoolCondition(pool.Status, condition.Type) + if currentCondition != nil && isConditionEqual(*currentCondition, condition) { + continue } + mcpCondition := mcfgv1.NewMachineConfigPoolCondition(condition.Type, condition.Status, condition.Reason, condition.Message) + mcfgv1.SetMachineConfigPoolCondition(&pool.Status, *mcpCondition) } } -// isGoodBuildPhase determines whether a build is okay, or if it had a problem that we potentially need to take action on. This is used to decide -// whether or not re-queue a machineconfig pool to check on its builds if the build came from one of its build controllers. -func isGoodBuildPhase(buildPhase buildv1.BuildPhase) bool { - - if buildPhase != buildv1.BuildPhaseFailed && buildPhase != buildv1.BuildPhaseCancelled && buildPhase != buildv1.BuildPhaseError { - return true - } - return false +// Determine if we have a config change. +func isPoolConfigChange(oldPool, curPool *mcfgv1.MachineConfigPool) bool { + return oldPool.Spec.Configuration.Name != curPool.Spec.Configuration.Name } -func (ctrl *Controller) getLabelsForImageRef(imageRef string) (map[string]string, error) { - fullImage, err := ctrl.imageclient.ImageV1().Images().Get(context.TODO(), imageRef, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("Could not retrieve image %s: %w", imageRef, err) - } - - // We need the labels out of the docker image but it's a raw extension - dockerLabels := struct { - Config struct { - Labels map[string]string `json:"Labels"` - } `json:"Config"` - }{} - - // Get the labels out and see what config this is - err = json.Unmarshal(fullImage.DockerImageMetadata.Raw, &dockerLabels) - if err != nil { - return nil, fmt.Errorf("Could not get labels from docker image metadata: %w", err) - } - return dockerLabels.Config.Labels, nil +// Determine if we have an image pullspec label. +func hasImagePullspecAnnotation(pool *mcfgv1.MachineConfigPool) bool { + imagePullspecAnnotation, ok := pool.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] + return imagePullspecAnnotation != "" && ok } -// ensureImageStreamPrecedenceIfPopulated tries to make the UX cleaner by automatically switching the pool to use custom/external imagestreams -// if it looks like the user has populated them. It will switch back if those imagestreams get cleared out. This is really just to save the user from -// having to update the pool annotations themselves. -func (ctrl *Controller) ensureImageStreamPrecedenceIfPopulated(pool *mcfgv1.MachineConfigPool) error { - glog.Infof("Ensuring imagestreams are populated for %s", pool.Name) - // Get the list of what resources should exist for this pool - pbr := PoolBuildResources(pool) - - // Get the imagestream object for the external base imagestream - coreosImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(ctrlcommon.CoreOSImageStreamName) - if err != nil { - return err +// Checks our pool to see if we can do a build. We base this off of a few criteria: +// 1. Is the pool opted into layering? +// 2. Do we have an object reference to an in-progress build? +// 3. Is the pool degraded? +// 4. Is our build in a specific state? +// +// Returns true if we are able to build. +func canPoolBuild(pool *mcfgv1.MachineConfigPool) bool { + // If we don't have a layered pool, we should not build. + if !ctrlcommon.IsLayeredPool(pool) { + return false } - // Get the imagestream object for the external base imagestream - baseImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(pbr.ImageStream.Base) - if err != nil { - return err + // If we have a reference to an in-progress build, we should not build. + if machineConfigPoolHasBuildRef(pool) { + return false } - // Get the imagestream object for the external base imagestream - externalBaseImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(pbr.ImageStream.ExternalBase) - if err != nil { - return err + // If the pool is degraded, we should not build. + if isPoolDegraded(pool) { + return false } - // Get the imagestream object for the external imagestream - externalImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(pbr.ImageStream.External) - if err != nil { - return err + // If the pool is in any of these states, we should not build. + conditionTypes := []mcfgv1.MachineConfigPoolConditionType{ + mcfgv1.MachineConfigPoolBuilding, + mcfgv1.MachineConfigPoolBuildPending, + mcfgv1.MachineConfigPoolBuildFailed, } - // Get the imagestream objects for the custom imagestream, too - customImageStream, err := ctrl.isLister.ImageStreams(ctrlcommon.MCONamespace).Get(pbr.ImageStream.CustomContent) - if err != nil { - return err + for _, conditionType := range conditionTypes { + if mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, conditionType) { + return false + } } - // Retrieve the name of the imagestream we're currently using - poolImageStreamName, _ := ctrlcommon.GetPoolImageStream(pool) + return true +} - // This is the place where we set the pool image stream if it's not set, so it's not an error here - if poolImageStreamName == "" { - ctrlcommon.SetPoolImageStream(pool, pbr.ImageStream.Content) +// Determines if a pool is in a degraded state. Returns true if the pool is in +// any kind of degraded state. +func isPoolDegraded(pool *mcfgv1.MachineConfigPool) bool { + degradedConditionTypes := []mcfgv1.MachineConfigPoolConditionType{ + mcfgv1.MachineConfigPoolDegraded, + mcfgv1.MachineConfigPoolRenderDegraded, + mcfgv1.MachineConfigPoolNodeDegraded, } - // Get the latest tag from external base - latestExternalBaseImageTag := ctrl.getMostRecentImageTagForImageStream(externalBaseImageStream, "latest") - latestBaseImageTag := ctrl.getMostRecentImageTagForImageStream(baseImageStream, "latest") - latestCoreOSImageTag := ctrl.getMostRecentImageTagForImageStream(coreosImageStream, "latest") - - // If there is something in external base, we need to tag it into our base - if latestExternalBaseImageTag != nil { - - if latestBaseImageTag == nil || latestBaseImageTag.Image != latestExternalBaseImageTag.Image { - if latestBaseImageTag != nil { - glog.Infof("Latest base: %s Latest external: %s", latestBaseImageTag.Image, latestExternalBaseImageTag.Image) - } else { - glog.Infof("Latest base image tag was empty, assigning external") - } - err := ctrl.tagImageIntoStream(externalBaseImageStream.Name, baseImageStream.Name, latestExternalBaseImageTag.Image, "latest") - if err != nil { - return err - } - } - } else { - // If there is nothing in external base, we should use coreos as our base - if latestBaseImageTag == nil { - if latestCoreOSImageTag == nil { - return fmt.Errorf("we don't have a CoreOS image yet -- probably still downloading, need to wait") - } - } else { - glog.Infof("Latest base: %s Latest coreos: %s", latestBaseImageTag.Image, latestCoreOSImageTag.Image) - - // If what we have is different than what coreos has, we should use what coreos has instead - if latestBaseImageTag.Image != latestCoreOSImageTag.Image { - err := ctrl.tagImageIntoStream(coreosImageStream.Name, baseImageStream.Name, latestCoreOSImageTag.Image, "latest") - if err != nil { - return err - } - } + for _, conditionType := range degradedConditionTypes { + if mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, conditionType) { + return true } - } - // If we aren't using the external image stream, and it is populated, we should switch to it - if poolImageStreamName != externalImageStream.Name && ctrl.getMostRecentImageTagForImageStream(externalImageStream, "latest") != nil { - // TODO(jkyros): Technically I event here before the update happens down below later, that seems dishonest for the user since - // at this point what I say happened hasn't happened yet - ctrl.eventRecorder.Event(pool, corev1.EventTypeNormal, "ImageStreamChange", "Image stream for pool "+pool.Name+" changed to "+externalImageStream.Name+ - " because it takes precedence and an image is present in it") - ctrlcommon.SetPoolImageStream(pool, pbr.ImageStream.External) - - // External isn't populated, see if we shuold fall back to custom if it has an image or an updated buildconfig - } else if poolImageStreamName != customImageStream.Name && ctrl.getMostRecentImageTagForImageStream(customImageStream, "latest") != nil { - ctrl.eventRecorder.Event(pool, corev1.EventTypeNormal, "ImageStreamChange", "Image stream for pool "+pool.Name+" changed to "+customImageStream.Name+ - " because it takes precedence and an image is present in it") - ctrlcommon.SetPoolImageStream(pool, pbr.ImageStream.CustomContent) - - } else if poolImageStreamName != pbr.ImageStream.Content { - // If we didn't catch one of the previous if blocks, we should be using the default MCO content stream. This lets us fall back - // if/when someone cleans out or deletes one of the imagestreams - // TODO(jkyros): This self-healing behavior does keep people from assigning arbitrary imagstreams (whether that's good or - // bad is up to us) - ctrl.eventRecorder.Event(pool, corev1.EventTypeNormal, "ImageStreamChange", "Image stream for pool "+pool.Name+" falling back to "+pbr.ImageStream.Content+ - " as others are unpopulated") - ctrlcommon.SetPoolImageStream(pool, pbr.ImageStream.Content) - } - return nil + return false } -func (ctrl *Controller) tagImageIntoStream(sourceImageStreamName, targetImageStreamName, imageName, tagName string) error { - - var internalRegistry = "image-registry.openshift-image-registry.svc:5000/" - fullTargetTagName := targetImageStreamName + ":" + tagName - // If you don't get the namespace prefix on there, it tries to pull it from docker.io and fails - fullSourceName := internalRegistry + ctrlcommon.MCONamespace + "/" + sourceImageStreamName + "@" + imageName - - var tag *imagev1.ImageStreamTag - tag, err := ctrl.imageclient.ImageV1().ImageStreamTags(ctrlcommon.MCONamespace).Get(context.TODO(), fullTargetTagName, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - - it := &imagev1.ImageStreamTag{ - - ObjectMeta: metav1.ObjectMeta{ - Name: fullTargetTagName, - Namespace: ctrlcommon.MCONamespace, - }, - Tag: &imagev1.TagReference{ - Name: tagName, - From: &corev1.ObjectReference{ - Kind: "ImageStreamImage", - Namespace: ctrlcommon.MCONamespace, - Name: fullSourceName, - }, - ReferencePolicy: imagev1.TagReferencePolicy{ - Type: imagev1.SourceTagReferencePolicy, - }, - }, - } - glog.Infof("Tagging image %s from %s into imagestream %s", imageName+":"+tagName, sourceImageStreamName, targetImageStreamName) - _, err = ctrl.imageclient.ImageV1().ImageStreamTags(ctrlcommon.MCONamespace).Create(context.TODO(), it, metav1.CreateOptions{}) - return err - - } - return err +// Determines if we should do a build based upon the state of our +// MachineConfigPool, the presence of a build pod, etc. +func shouldWeDoABuild(builder interface { + IsBuildRunning(*mcfgv1.MachineConfigPool) (bool, error) +}, oldPool, curPool *mcfgv1.MachineConfigPool) (bool, error) { + // If we don't have a layered pool, we should not build. + poolStateSuggestsBuild := canPoolBuild(curPool) && + // If we have a config change or we're missing an image pullspec label, we + // should do a build. + (isPoolConfigChange(oldPool, curPool) || !hasImagePullspecAnnotation(curPool)) && + // If we're missing a build pod reference, it likely means we don't need to + // do a build. + !machineConfigPoolHasBuildRef(curPool) + if !poolStateSuggestsBuild { + return false, nil } - tag.Tag.From.Name = fullSourceName - glog.Infof("Updating image tag %s from %s into imagestream %s", imageName+":"+tagName, sourceImageStreamName, targetImageStreamName) - _, err = ctrl.imageclient.ImageV1().ImageStreamTags(ctrlcommon.MCONamespace).Update(context.TODO(), tag, metav1.UpdateOptions{}) - return err + // If a build is found running, we should not do a build. + isRunning, err := builder.IsBuildRunning(curPool) + return !isRunning, err } -func (ctrl *Controller) cheatMachineConfigLabelIntoBuildConfig(imageStream *imagev1.ImageStream, pool *mcfgv1.MachineConfigPool) error { - // This is the mco content imagestream - latestImageTag := ctrl.getMostRecentImageTagForImageStream(imageStream, "latest") - if latestImageTag == nil { - return fmt.Errorf("No 'latest' image tag in imagestream %s: ", imageStream.Name) - - } - labels, err := ctrl.getLabelsForImageRef(latestImageTag.Image) - if err != nil { - return fmt.Errorf("Failed to retrieve labels for imagestream tag %s: %w", latestImageTag.DockerImageReference, err) +// Enumerates all of the build-related MachineConfigPool condition types. +func getMachineConfigPoolBuildConditions() []mcfgv1.MachineConfigPoolConditionType { + return []mcfgv1.MachineConfigPoolConditionType{ + mcfgv1.MachineConfigPoolBuildFailed, + mcfgv1.MachineConfigPoolBuildPending, + mcfgv1.MachineConfigPoolBuildSuccess, + mcfgv1.MachineConfigPoolBuilding, } +} - buildConfig, err := ctrl.bcLister.BuildConfigs(ctrlcommon.MCONamespace).Get(pool.Name + "-build" + ctrlcommon.ImageStreamSuffixMCOContent) - if err != nil { - return fmt.Errorf("Failed to retrieve corresponding buildconfig: %w", err) +// Determines if a pod or build is managed by this controller by examining its labels. +func hasAllRequiredOSBuildLabels(labels map[string]string) bool { + requiredLabels := []string{ + ctrlcommon.OSImageBuildPodLabel, + targetMachineConfigPoolLabel, + desiredConfigLabel, } - // Get buildconfig - _, err = ctrl.updateBuildConfigWithLabels(buildConfig, labels) - if err != nil { - return fmt.Errorf("Failed to update buildconfig %s with labels: %w", buildConfig.Name, err) + for _, label := range requiredLabels { + if _, ok := labels[label]; !ok { + return false + } } - return nil + return true } diff --git a/pkg/controller/build/build_controller_test.go b/pkg/controller/build/build_controller_test.go new file mode 100644 index 0000000000..58e1d2773b --- /dev/null +++ b/pkg/controller/build/build_controller_test.go @@ -0,0 +1,674 @@ +package build + +import ( + "context" + "fmt" + "os" + "time" + + ign3types "github.com/coreos/ignition/v2/config/v3_2/types" + buildv1 "github.com/openshift/api/build/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + fakeclientbuildv1 "github.com/openshift/client-go/build/clientset/versioned/fake" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + fakeclientmachineconfigv1 "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/fake" + testhelpers "github.com/openshift/machine-config-operator/test/helpers" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakecorev1client "k8s.io/client-go/kubernetes/fake" + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + + "testing" +) + +const ( + expectedImageSHA string = "sha256:628e4e8f0a78d91015c6cebeee95931ae2e8defe5dfb4ced4a82830e08937573" + expectedImagePullspecWithTag string = "registry.hostname.com/org/repo:latest" + expectedImagePullspecWithSHA string = "registry.hostname.com/org/repo@" + expectedImageSHA +) + +type optInFunc func(context.Context, *testing.T, *Clients, string) + +func TestMain(m *testing.M) { + klog.InitFlags(nil) + os.Exit(m.Run()) +} + +func TestBuildControllerNoPoolsOptedIn(t *testing.T) { + t.Parallel() + + fixture := newBuildControllerTestFixture(t) + fixture.runTestFuncs(t, testFuncs{ + imageBuilder: testNoMCPsOptedIn, + customPodBuilder: testNoMCPsOptedIn, + }) +} + +func TestBuildControllerSingleOptedInPool(t *testing.T) { + pool := "worker" + + t.Parallel() + + t.Run("Happy Path", func(t *testing.T) { + t.Parallel() + + newBuildControllerTestFixture(t).runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptInMCPImageBuilder(ctx, t, cs, pool) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptInMCPCustomBuildPod(ctx, t, cs, pool) + }, + }) + }) + + t.Run("Happy Path Multiple Configs", func(t *testing.T) { + t.Parallel() + + newBuildControllerTestFixture(t).runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptInMCPImageBuilder(ctx, t, cs, pool) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptInMCPCustomBuildPod(ctx, t, cs, pool) + }, + }) + }) + + t.Run("Build Failure", func(t *testing.T) { + t.Parallel() + + newBuildControllerTestFixture(t).runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + mcp := optInMCP(ctx, t, cs, pool) + assertMCPFollowsImageBuildStatus(ctx, t, cs, mcp, buildv1.BuildPhaseFailed) + assertMachineConfigPoolReachesState(ctx, t, cs, pool, isMCPBuildFailure) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + mcp := optInMCP(ctx, t, cs, pool) + assertMCPFollowsBuildPodStatus(ctx, t, cs, mcp, corev1.PodFailed) + assertMachineConfigPoolReachesState(ctx, t, cs, pool, isMCPBuildFailure) + }, + }) + }) + + t.Run("Degraded Pool", func(t *testing.T) { + t.Parallel() + + newBuildControllerTestFixture(t).runTestFuncs(t, testFuncs{ + imageBuilder: testMCPIsDegraded, + customPodBuilder: testMCPIsDegraded, + }) + }) + + t.Run("Opted-in pool opts out", func(t *testing.T) { + t.Parallel() + + newBuildControllerTestFixture(t).runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptedInMCPOptsOut(ctx, t, cs, testOptInMCPImageBuilder) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptedInMCPOptsOut(ctx, t, cs, testOptInMCPCustomBuildPod) + }, + }) + }) + + t.Run("Built pool gets unrelated update", func(t *testing.T) { + t.Parallel() + + newBuildControllerTestFixture(t).runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptedInMCPOptsOut(ctx, t, cs, testOptInMCPImageBuilder) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptedInMCPOptsOut(ctx, t, cs, testOptInMCPCustomBuildPod) + }, + }) + }) +} + +func TestBuildControllerMultipleOptedInPools(t *testing.T) { + t.Parallel() + + pools := []string{"master", "worker"} + + // Tests that a single config is rolled out to the target MachineConfigPools. + t.Run("Happy Path", func(t *testing.T) { + t.Parallel() + + fixture := newBuildControllerTestFixture(t) + for _, pool := range pools { + pool := pool + t.Run(pool, func(t *testing.T) { + t.Parallel() + fixture.runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + t.Logf("Running in pool %s", pool) + testOptInMCPImageBuilder(ctx, t, cs, pool) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + t.Logf("Running in pool %s", pool) + testOptInMCPCustomBuildPod(ctx, t, cs, pool) + }, + }) + }) + } + }) + + // Tests that multiple configs are serially rolled out to the target + // MachineConfigPool and ensures that each config is rolled out before moving + // onto the next one. + t.Run("Happy Path Multiple Configs", func(t *testing.T) { + t.Parallel() + + fixture := newBuildControllerTestFixture(t) + + for _, pool := range pools { + pool := pool + t.Run(pool, func(t *testing.T) { + t.Parallel() + + fixture.runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testMultipleConfigsAreRolledOut(ctx, t, cs, pool, testOptInMCPImageBuilder) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testMultipleConfigsAreRolledOut(ctx, t, cs, pool, testOptInMCPCustomBuildPod) + }, + }) + }) + } + }) + + // Tests that a build failure degrades the MachineConfigPool + t.Run("Build Failure", func(t *testing.T) { + t.Parallel() + + fixture := newBuildControllerTestFixture(t) + + for _, pool := range pools { + pool := pool + t.Run(pool, func(t *testing.T) { + t.Parallel() + + fixture.runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + mcp := optInMCP(ctx, t, cs, pool) + assertMCPFollowsImageBuildStatus(ctx, t, cs, mcp, buildv1.BuildPhaseFailed) + assertMachineConfigPoolReachesState(ctx, t, cs, pool, isMCPBuildFailure) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + mcp := optInMCP(ctx, t, cs, pool) + assertMCPFollowsBuildPodStatus(ctx, t, cs, mcp, corev1.PodFailed) + assertMachineConfigPoolReachesState(ctx, t, cs, pool, isMCPBuildFailure) + }, + }) + }) + } + }) +} + +// Holds a name and function to implement a given BuildController test. +type buildControllerTestFixture struct { + ctx context.Context + t *testing.T + imageBuilderClient *Clients + customPodBuilderClient *Clients +} + +type testFuncs struct { + imageBuilder func(context.Context, *testing.T, *Clients) + customPodBuilder func(context.Context, *testing.T, *Clients) +} + +func newBuildControllerTestFixtureWithContext(ctx context.Context, t *testing.T) *buildControllerTestFixture { + b := &buildControllerTestFixture{ + ctx: ctx, + t: t, + } + + b.imageBuilderClient = b.startBuildControllerWithImageBuilder() + b.customPodBuilderClient = b.startBuildControllerWithCustomPodBuilder() + + return b +} + +func newBuildControllerTestFixture(t *testing.T) *buildControllerTestFixture { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + t.Cleanup(cancel) + + return newBuildControllerTestFixtureWithContext(ctx, t) +} + +func (b *buildControllerTestFixture) runTestFuncs(t *testing.T, tf testFuncs) { + t.Run("CustomBuildPod", func(t *testing.T) { + t.Parallel() + // t.Cleanup(func() { + // dumpObjects(b.ctx, t, b.customPodBuilderClient, t.Name()) + // }) + tf.customPodBuilder(b.ctx, t, b.customPodBuilderClient) + }) + + t.Run("ImageBuilder", func(t *testing.T) { + t.Parallel() + // t.Cleanup(func() { + // dumpObjects(b.ctx, t, b.imageBuilderClient, t.Name()) + // }) + + tf.imageBuilder(b.ctx, t, b.imageBuilderClient) + }) +} + +func (b *buildControllerTestFixture) setupClients() *Clients { + objects := newMachineConfigPoolAndConfigs("master", "rendered-master-1") + objects = append(objects, newMachineConfigPoolAndConfigs("worker", "rendered-worker-1")...) + objects = append(objects, &mcfgv1.ControllerConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-config-controller", + }, + }) + + onClusterBuildConfigMap := getOnClusterBuildConfigMap() + + legacyPullSecret := `{"registry.hostname.com": {"username": "user", "password": "s3kr1t", "auth": "s00pers3kr1t", "email": "user@hostname.com"}}` + + pullSecret := `{"auths":{"registry.hostname.com": {"username": "user", "password": "s3kr1t", "auth": "s00pers3kr1t", "email": "user@hostname.com"}}}` + + return &Clients{ + mcfgclient: fakeclientmachineconfigv1.NewSimpleClientset(objects...), + kubeclient: fakecorev1client.NewSimpleClientset( + getOSImageURLConfigMap(), + onClusterBuildConfigMap, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: onClusterBuildConfigMap.Data["finalImagePushSecretName"], + Namespace: ctrlcommon.MCONamespace, + }, + Data: map[string][]byte{ + corev1.DockerConfigKey: []byte(legacyPullSecret), + }, + Type: corev1.SecretTypeDockercfg, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: onClusterBuildConfigMap.Data["baseImagePullSecretName"], + Namespace: ctrlcommon.MCONamespace, + }, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: []byte(pullSecret), + }, + Type: corev1.SecretTypeDockerConfigJson, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etc-pki-entitlement", + Namespace: "openshift-config-managed", + }, + Data: map[string][]byte{ + "entitlement-key.pem": []byte("abc"), + "entitlement.pem": []byte("123"), + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-config-operator", + Namespace: ctrlcommon.MCONamespace, + }, + }, + ), + buildclient: fakeclientbuildv1.NewSimpleClientset(), + } +} + +func (b *buildControllerTestFixture) getConfig() BuildControllerConfig { + return BuildControllerConfig{ + MaxRetries: 5, + UpdateDelay: time.Millisecond, + } +} + +// Instantiates all of the initial objects and starts the BuildController. +func (b *buildControllerTestFixture) startBuildControllerWithImageBuilder() *Clients { + clients := b.setupClients() + + ctrl := NewWithImageBuilder(b.getConfig(), clients) + + go ctrl.Run(b.ctx, 5) + + return clients +} + +func (b *buildControllerTestFixture) startBuildControllerWithCustomPodBuilder() *Clients { + clients := b.setupClients() + + ctrl := NewWithCustomPodBuilder(b.getConfig(), clients) + + go ctrl.Run(b.ctx, 5) + + return clients +} + +// Helper that determines if the build is a success. +func isMCPBuildSuccess(mcp *mcfgv1.MachineConfigPool) bool { + imagePullspec, hasConfigAnnotation := mcp.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] + + return hasConfigAnnotation && + ctrlcommon.IsLayeredPool(mcp) && + // Unfortunately, FakeClient has a "fake logs" value hardcoded within it. + // With that in mind, we cannot inject realistic Skopeo logs, so we instead + // inject "fake@logs" and skip the JSON parsing portion of the test. See: + // https://github.com/kubernetes/kubernetes/pull/91485. Presumably, once + // https://github.com/kubernetes/kubernetes/issues/117144 is addressed and + // lands, we can stop doing this. + (imagePullspec == expectedImagePullspecWithSHA || imagePullspec == "fake@logs") && + mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolBuildSuccess) && + !machineConfigPoolHasBuildRef(mcp) && machineConfigPoolHasMachineConfigRefs(mcp) +} + +func machineConfigPoolHasMachineConfigRefs(pool *mcfgv1.MachineConfigPool) bool { + expectedMCP := newMachineConfigPool(pool.Name) + + for _, ref := range expectedMCP.Spec.Configuration.Source { + if !machineConfigPoolHasObjectRef(pool, ref) { + return false + } + } + + return true +} + +// Helper that determines if the build was a failure. +func isMCPBuildFailure(mcp *mcfgv1.MachineConfigPool) bool { + return ctrlcommon.IsLayeredPool(mcp) && + mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolBuildFailed) && + mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolDegraded) && + machineConfigPoolHasBuildRef(mcp) && machineConfigPoolHasMachineConfigRefs(mcp) +} + +// Opts a given MachineConfigPool into layering and asserts that the MachineConfigPool reaches the desired state. +func testOptInMCPCustomBuildPod(ctx context.Context, t *testing.T, cs *Clients, poolName string) { + mcp := optInMCP(ctx, t, cs, poolName) + assertMCPFollowsBuildPodStatus(ctx, t, cs, mcp, corev1.PodSucceeded) + assertMachineConfigPoolReachesState(ctx, t, cs, poolName, isMCPBuildSuccess) +} + +// Opts a given MachineConfigPool into layering and asserts that the MachineConfigPool reaches the desired state. +func testOptInMCPImageBuilder(ctx context.Context, t *testing.T, cs *Clients, poolName string) { + mcp := optInMCP(ctx, t, cs, poolName) + assertMCPFollowsImageBuildStatus(ctx, t, cs, mcp, buildv1.BuildPhaseComplete) + assertMachineConfigPoolReachesState(ctx, t, cs, poolName, isMCPBuildSuccess) +} + +// Mutates all MachineConfigPools that are not opted in to ensure they are ignored. +func testNoMCPsOptedIn(ctx context.Context, t *testing.T, cs *Clients) { + // Set an unrelated label to force a sync. + mcpList, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + for _, mcp := range mcpList.Items { + mcp := mcp + mcp.Labels["a-label-key"] = "" + _, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, &mcp, metav1.UpdateOptions{}) + require.NoError(t, err) + } + + mcpList, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + for _, mcp := range mcpList.Items { + mcp := mcp + assert.False(t, ctrlcommon.IsLayeredPool(&mcp)) + assert.NotContains(t, mcp.Labels, ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey) + } +} + +// Rolls out multiple configs to a given pool, asserting that each config is completely rolled out before moving onto the next. +func testMultipleConfigsAreRolledOut(ctx context.Context, t *testing.T, cs *Clients, poolName string, optInFunc optInFunc) { + for i := 1; i < 10; i++ { + config := fmt.Sprintf("rendered-%s-%d", poolName, i) + + t.Run(config, func(t *testing.T) { + workerMCP, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + require.NoError(t, err) + + workerMCP.Spec.Configuration.Name = config + + renderedMC := testhelpers.NewMachineConfig( + config, + map[string]string{ + ctrlcommon.GeneratedByControllerVersionAnnotationKey: "version-number", + "machineconfiguration.openshift.io/role": poolName, + }, + "", + []ign3types.File{}) + + _, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigs().Create(ctx, renderedMC, metav1.CreateOptions{}) + if err != nil && !k8serrors.IsAlreadyExists(err) { + require.NoError(t, err) + } + + _, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, workerMCP, metav1.UpdateOptions{}) + require.NoError(t, err) + optInFunc(ctx, t, cs, poolName) + + var targetPool *mcfgv1.MachineConfigPool + + outcome := assertMachineConfigPoolReachesState(ctx, t, cs, poolName, func(mcp *mcfgv1.MachineConfigPool) bool { + targetPool = mcp + return mcp.Spec.Configuration.Name == config && isMCPBuildSuccess(mcp) && machineConfigPoolHasMachineConfigRefs(mcp) + }) + + if !outcome { + t.Logf("Config name, actual: %s, expected: %v", targetPool.Spec.Configuration.Name, config) + t.Logf("Is build success? %v", isMCPBuildSuccess(targetPool)) + t.Logf("Has all MachineConfig refs? %v", machineConfigPoolHasMachineConfigRefs(targetPool)) + } + + time.Sleep(time.Millisecond) + }) + } +} + +// Tests that an opted-in MachineConfigPool is able to opt back out. +func testOptedInMCPOptsOut(ctx context.Context, t *testing.T, cs *Clients, optInFunc optInFunc) { + optInFunc(ctx, t, cs, "worker") + + optOutMCP(ctx, t, cs, "worker") + + assertMachineConfigPoolReachesState(ctx, t, cs, "worker", func(mcp *mcfgv1.MachineConfigPool) bool { + layeringLabels := []string{ + ctrlcommon.LayeringEnabledPoolLabel, + } + + for _, label := range layeringLabels { + if _, ok := mcp.Labels[label]; ok { + return false + } + } + + for _, condition := range getMachineConfigPoolBuildConditions() { + if mcfgv1.IsMachineConfigPoolConditionPresentAndEqual(mcp.Status.Conditions, condition, corev1.ConditionTrue) || + mcfgv1.IsMachineConfigPoolConditionPresentAndEqual(mcp.Status.Conditions, condition, corev1.ConditionFalse) { + return false + } + } + + return !machineConfigPoolHasBuildRef(mcp) + }) +} + +// Tests that if a MachineConfigPool is degraded, that a build (object / pod) is not created. +func testMCPIsDegraded(ctx context.Context, t *testing.T, cs *Clients) { + mcp, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(ctx, "worker", metav1.GetOptions{}) + require.NoError(t, err) + + mcp.Labels[ctrlcommon.LayeringEnabledPoolLabel] = "" + + condition := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolDegraded, corev1.ConditionTrue, "", "") + mcfgv1.SetMachineConfigPoolCondition(&mcp.Status, *condition) + + _, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, mcp, metav1.UpdateOptions{}) + require.NoError(t, err) + + assertMachineConfigPoolReachesState(ctx, t, cs, "worker", func(mcp *mcfgv1.MachineConfigPool) bool { + // TODO: Should we fail the build without even starting it if the pool is degraded? + for _, condition := range getMachineConfigPoolBuildConditions() { + if mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, condition) { + return false + } + } + + return mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolDegraded) && + assertNoBuildPods(ctx, t, cs) && + assertNoBuilds(ctx, t, cs) + }) +} + +// Tests that a label update or similar does not cause a build to occur. +func testBuiltPoolGetsUnrelatedUpdate(ctx context.Context, t *testing.T, cs *Clients, optInFunc optInFunc) { + optInFunc(ctx, t, cs, "worker") + + pool, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(ctx, "worker", metav1.GetOptions{}) + require.NoError(t, err) + + pool.Annotations["unrelated-annotation"] = "hello" + pool.Labels["unrelated-label"] = "" + _, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, pool, metav1.UpdateOptions{}) + require.NoError(t, err) + + assertMachineConfigPoolReachesState(ctx, t, cs, "worker", func(mcp *mcfgv1.MachineConfigPool) bool { + return assert.Equal(t, mcp.Status.Conditions, pool.Status.Conditions) && + assertNoBuildPods(ctx, t, cs) && + assertNoBuilds(ctx, t, cs) + }) +} + +// Mocks whether a given build is running. +type mockIsBuildRunning bool + +func (m *mockIsBuildRunning) IsBuildRunning(*mcfgv1.MachineConfigPool) (bool, error) { + return bool(*m), nil +} + +// Tests if we should do a build for a variety of edge-cases and circumstances. +func TestShouldWeDoABuild(t *testing.T) { + t.Parallel() + + // Mutators which mutate the given MachineConfigPool. + toLayeredPool := func(mcp *mcfgv1.MachineConfigPool) *mcfgv1.MachineConfigPool { + mcp.Labels[ctrlcommon.LayeringEnabledPoolLabel] = "" + return mcp + } + + toLayeredPoolWithImagePullspec := func(mcp *mcfgv1.MachineConfigPool) *mcfgv1.MachineConfigPool { + mcp = toLayeredPool(mcp) + mcp.Annotations = map[string]string{ + ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey: "image-pullspec", + } + return mcp + } + + toLayeredPoolWithConditionsSet := func(mcp *mcfgv1.MachineConfigPool, conditions []mcfgv1.MachineConfigPoolCondition) *mcfgv1.MachineConfigPool { + mcp = toLayeredPoolWithImagePullspec(mcp) + setMCPBuildConditions(mcp, conditions) + return mcp + } + + type shouldWeBuildTestCase struct { + name string + oldPool *mcfgv1.MachineConfigPool + curPool *mcfgv1.MachineConfigPool + buildRunning bool + expected bool + } + + testCases := []shouldWeBuildTestCase{ + { + name: "Non-layered pool", + oldPool: newMachineConfigPool("worker", "rendered-worker-1"), + curPool: newMachineConfigPool("worker", "rendered-worker-1"), + expected: false, + }, + { + name: "Layered pool config change with missing image pullspec", + oldPool: toLayeredPool(newMachineConfigPool("worker", "rendered-worker-1")), + curPool: toLayeredPool(newMachineConfigPool("worker", "rendered-worker-2")), + expected: true, + }, + { + name: "Layered pool with no config change and missing image pullspec", + oldPool: toLayeredPool(newMachineConfigPool("worker", "rendered-worker-1")), + curPool: toLayeredPool(newMachineConfigPool("worker", "rendered-worker-1")), + expected: true, + }, + { + name: "Layered pool with image pullspec", + oldPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-1")), + curPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-1")), + }, + { + name: "Layered pool with build pod", + oldPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-1")), + curPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-1")), + buildRunning: true, + expected: false, + }, + { + name: "Layered pool with prior successful build and config change", + oldPool: toLayeredPoolWithConditionsSet(newMachineConfigPool("worker", "rendered-worker-1"), []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Status: corev1.ConditionTrue, + }, + }), + curPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-2")), + expected: true, + }, + } + + // Generate additional test cases programmatically. + buildStates := map[mcfgv1.MachineConfigPoolConditionType]string{ + mcfgv1.MachineConfigPoolBuildFailed: "failed", + mcfgv1.MachineConfigPoolBuildPending: "pending", + mcfgv1.MachineConfigPoolBuilding: "in progress", + mcfgv1.MachineConfigPoolDegraded: "degraded", + mcfgv1.MachineConfigPoolNodeDegraded: "node degraded", + mcfgv1.MachineConfigPoolRenderDegraded: "render degraded", + } + + for conditionType, name := range buildStates { + conditions := []mcfgv1.MachineConfigPoolCondition{ + { + Type: conditionType, + Status: corev1.ConditionTrue, + }, + } + + testCases = append(testCases, shouldWeBuildTestCase{ + name: fmt.Sprintf("Layered pool with %s build", name), + oldPool: toLayeredPoolWithConditionsSet(newMachineConfigPool("worker", "rendered-worker-1"), conditions), + curPool: toLayeredPoolWithConditionsSet(newMachineConfigPool("worker", "rendered-worker-1"), conditions), + expected: false, + }) + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + mb := mockIsBuildRunning(testCase.buildRunning) + + doABuild, err := shouldWeDoABuild(&mb, testCase.oldPool, testCase.curPool) + assert.NoError(t, err) + assert.Equal(t, testCase.expected, doABuild) + }) + } +} diff --git a/pkg/controller/build/fixtures_test.go b/pkg/controller/build/fixtures_test.go new file mode 100644 index 0000000000..852977694f --- /dev/null +++ b/pkg/controller/build/fixtures_test.go @@ -0,0 +1,566 @@ +package build + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + "testing" + "time" + + ign3types "github.com/coreos/ignition/v2/config/v3_2/types" + "github.com/davecgh/go-spew/spew" + "github.com/ghodss/yaml" + buildv1 "github.com/openshift/api/build/v1" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + testhelpers "github.com/openshift/machine-config-operator/test/helpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" +) + +// Gets an example machine-config-osimageurl ConfigMap. +func getOSImageURLConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineConfigOSImageURLConfigMapName, + Namespace: ctrlcommon.MCONamespace, + }, + Data: map[string]string{ + baseOSContainerImageConfigKey: "registry.ci.openshift.org/ocp/4.14-2023-05-29-125629@sha256:12e89d631c0ca1700262583acfb856b6e7dbe94800cb38035d68ee5cc912411c", + baseOSExtensionsContainerImageConfigKey: "registry.ci.openshift.org/ocp/4.14-2023-05-29-125629@sha256:5b6d901069e640fc53d2e971fa1f4802bf9dea1a4ffba67b8a17eaa7d8dfa336", + osImageURLConfigKey: "registry.ci.openshift.org/ocp/4.14-2023-05-29-125629@sha256:4f7792412d1559bf0a996edeff5e836e210f6d77df94b552a3866144d043bce1", + releaseVersionConfigKey: "4.14.0-0.ci-2023-05-29-125629", + }, + } +} + +// Gets an example on-cluster-build-config ConfigMap. +func getOnClusterBuildConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: onClusterBuildConfigMapName, + Namespace: ctrlcommon.MCONamespace, + }, + Data: map[string]string{ + baseImagePullSecretNameConfigKey: "base-image-pull-secret", + finalImagePushSecretNameConfigKey: "final-image-push-secret", + finalImagePullspecConfigKey: expectedImagePullspecWithTag, + }, + } +} + +// Creates a new MachineConfigPool and the corresponding MachineConfigs. +func newMachineConfigPoolAndConfigs(name string, params ...string) []runtime.Object { + mcp := newMachineConfigPool(name, params...) + + out := []runtime.Object{mcp} + + files := []ign3types.File{} + + // Create individual MachineConfigs to accompany the child MachineConfigs referred to by our MachineConfigPool. + for _, childConfig := range mcp.Spec.Configuration.Source { + if childConfig.Kind != "MachineConfig" { + continue + } + + filename := fmt.Sprintf("/etc/%s", childConfig.Name) + file := ctrlcommon.NewIgnFile(filename, childConfig.Name) + files = append(files, file) + + out = append(out, testhelpers.NewMachineConfig( + childConfig.Name, + map[string]string{ + "machineconfiguration.openshift.io/role": name, + }, + "", + []ign3types.File{file})) + } + + // Create a rendered MachineConfig to accompany our MachineConfigPool. + out = append(out, testhelpers.NewMachineConfig( + mcp.Spec.Configuration.Name, + map[string]string{ + ctrlcommon.GeneratedByControllerVersionAnnotationKey: "version-number", + "machineconfiguration.openshift.io/role": name, + }, + "", + files)) + + return out +} + +// Creates a simple MachineConfigPool object for testing. Requires a name for +// the MachineConfigPool, optionally accepts a name for the rendered config. +func newMachineConfigPool(name string, params ...string) *mcfgv1.MachineConfigPool { + renderedConfigName := "" + if len(params) >= 1 { + renderedConfigName = params[0] + } else { + renderedConfigName = fmt.Sprintf("rendered-%s-1", name) + } + + childConfigs := []corev1.ObjectReference{} + for i := 1; i <= 5; i++ { + childConfigs = append(childConfigs, corev1.ObjectReference{ + Name: fmt.Sprintf("%s-config-%d", name, i), + Kind: "MachineConfig", + }) + } + + nodeRoleLabel := fmt.Sprintf("node-role.kubernetes.io/%s", name) + nodeSelector := metav1.AddLabelToSelector(&metav1.LabelSelector{}, nodeRoleLabel, "") + + poolSelector := metav1.AddLabelToSelector(&metav1.LabelSelector{}, mcfgv1.MachineConfigRoleLabelKey, name) + + mcp := testhelpers.NewMachineConfigPool(name, poolSelector, nodeSelector, renderedConfigName) + mcp.Spec.Configuration.Source = append(mcp.Spec.Configuration.Source, childConfigs...) + mcp.Status.Configuration.Source = append(mcp.Status.Configuration.Source, childConfigs...) + + return mcp +} + +// Opts a MachineConfigPool into layering. +func optInMCP(ctx context.Context, t *testing.T, cs *Clients, poolName string) *mcfgv1.MachineConfigPool { + t.Helper() + + mcp, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + require.NoError(t, err) + + mcp.Labels[ctrlcommon.LayeringEnabledPoolLabel] = "" + + mcp, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, mcp, metav1.UpdateOptions{}) + require.NoError(t, err) + + return mcp +} + +// Opts a MachineConfigPool out of layering. +func optOutMCP(ctx context.Context, t *testing.T, cs *Clients, poolName string) { + t.Helper() + + mcp, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + require.NoError(t, err) + + delete(mcp.Labels, ctrlcommon.LayeringEnabledPoolLabel) + + _, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, mcp, metav1.UpdateOptions{}) + require.NoError(t, err) +} + +// Polls until a MachineConfigPool reaches a desired state. +func assertMachineConfigPoolReachesState(ctx context.Context, t *testing.T, cs *Clients, poolName string, checkFunc func(*mcfgv1.MachineConfigPool) bool) bool { + t.Helper() + + pollCtx, cancel := context.WithTimeout(ctx, time.Second*10) + t.Cleanup(cancel) + + err := wait.PollImmediateUntilWithContext(pollCtx, time.Millisecond, func(c context.Context) (bool, error) { + mcp, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(c, poolName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + return checkFunc(mcp), nil + }) + + return assert.NoError(t, err, "MachineConfigPool %s never reached desired state", poolName) +} + +// Asserts that there are no build pods. +func assertNoBuildPods(ctx context.Context, t *testing.T, cs *Clients) bool { + t.Helper() + + foundBuildPods := false + + buildPodNames := []string{} + + podList, err := cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + for _, pod := range podList.Items { + pod := pod + if hasAllRequiredOSBuildLabels(pod.Labels) { + foundBuildPods = true + buildPodNames = append(buildPodNames, pod.Name) + } + } + + return assert.False(t, foundBuildPods, "expected not to find build pods, found: %v", buildPodNames) +} + +// Asserts that there are no builds. +func assertNoBuilds(ctx context.Context, t *testing.T, cs *Clients) bool { + t.Helper() + + foundBuilds := false + + buildNames := []string{} + + buildList, err := cs.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + for _, build := range buildList.Items { + build := build + if hasAllRequiredOSBuildLabels(build.Labels) { + foundBuilds = true + buildNames = append(buildNames, build.Name) + } + } + + return assert.False(t, foundBuilds, "expected not to find builds, found: %v", buildNames) +} + +// Asserts that ConfigMaps were created. +func assertConfigMapsCreated(ctx context.Context, t *testing.T, cs *Clients, ibr ImageBuildRequest) bool { + t.Helper() + + isFound := func(name string, configmapList *corev1.ConfigMapList) bool { + for _, item := range configmapList.Items { + if item.Name == name && hasAllRequiredOSBuildLabels(item.Labels) { + return true + } + } + + return false + } + + expectedConfigmaps := map[string]bool{ + ibr.getDockerfileConfigMapName(): false, + ibr.getMCConfigMapName(): false, + } + + err := wait.PollImmediateInfiniteWithContext(ctx, time.Millisecond, func(ctx context.Context) (bool, error) { + configmapList, err := cs.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + + for expected := range expectedConfigmaps { + if isFound(expected, configmapList) { + expectedConfigmaps[expected] = true + } else { + return false, nil + } + } + + return true, nil + }) + + return assert.NoError(t, err, "configmap(s) was not created %v", expectedConfigmaps) +} + +// Polls until a build is created. +func assertBuildIsCreated(ctx context.Context, t *testing.T, cs *Clients, ibr ImageBuildRequest) bool { + t.Helper() + + buildName := ibr.getBuildName() + + err := wait.PollImmediateInfiniteWithContext(ctx, time.Millisecond, func(ctx context.Context) (bool, error) { + buildList, err := cs.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + + for _, build := range buildList.Items { + if build.Name == buildName { + return true, nil + } + } + + return false, nil + }) + + return assert.NoError(t, err, "build %s was not created", buildName) +} + +// Polls until a build pod is created. +func assertBuildPodIsCreated(ctx context.Context, t *testing.T, cs *Clients, ibr ImageBuildRequest) bool { + t.Helper() + + buildPodName := ibr.getBuildName() + + err := wait.PollImmediateInfiniteWithContext(ctx, time.Millisecond, func(ctx context.Context) (bool, error) { + podList, err := cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + + for _, pod := range podList.Items { + if pod.Name == buildPodName { + return true, nil + } + } + + return false, nil + }) + + return assert.NoError(t, err, "build pod %s was not created", buildPodName) +} + +// Simulates a pod being scheduled and reaching various states. Verifies that +// the target MachineConfigPool reaches the expected states as it goes. +func assertMCPFollowsImageBuildStatus(ctx context.Context, t *testing.T, cs *Clients, mcp *mcfgv1.MachineConfigPool, endingPhase buildv1.BuildPhase) bool { //nolint:unparam // This param is actually used. + t.Helper() + + var outcome bool + + defer func() { + assert.True(t, outcome) + }() + + // Each of the various pod phases we're interested in. + buildPhases := []buildv1.BuildPhase{ + buildv1.BuildPhaseNew, + buildv1.BuildPhasePending, + buildv1.BuildPhaseRunning, + endingPhase, + } + + // Each pod phase is correllated to a MachineConfigPoolConditionType. + buildPhaseToMCPCondition := map[buildv1.BuildPhase]mcfgv1.MachineConfigPoolConditionType{ + buildv1.BuildPhaseNew: mcfgv1.MachineConfigPoolBuildPending, + buildv1.BuildPhasePending: mcfgv1.MachineConfigPoolBuildPending, + buildv1.BuildPhaseRunning: mcfgv1.MachineConfigPoolBuilding, + buildv1.BuildPhaseComplete: mcfgv1.MachineConfigPoolBuildSuccess, + buildv1.BuildPhaseError: mcfgv1.MachineConfigPoolBuildFailed, + buildv1.BuildPhaseFailed: mcfgv1.MachineConfigPoolBuildFailed, + buildv1.BuildPhaseCancelled: mcfgv1.MachineConfigPoolBuildFailed, + } + + // Determine if the MachineConfigPool should have a reference to the build pod. + shouldHaveBuildRef := map[buildv1.BuildPhase]bool{ + buildv1.BuildPhaseNew: true, + buildv1.BuildPhasePending: true, + buildv1.BuildPhaseRunning: true, + buildv1.BuildPhaseComplete: false, + buildv1.BuildPhaseError: true, + buildv1.BuildPhaseFailed: true, + buildv1.BuildPhaseCancelled: true, + } + + ibr := newImageBuildRequest(mcp) + + buildName := ibr.getBuildName() + + // Wait for the build pod to be created. + outcome = assertBuildIsCreated(ctx, t, cs, ibr) + if !outcome { + return false + } + + outcome = assertConfigMapsCreated(ctx, t, cs, ibr) + if !outcome { + return false + } + + // Cycle through each of the build pod phases. + for _, phase := range buildPhases { + // Get the build pod by name. + build, err := cs.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(ctx, buildName, metav1.GetOptions{}) + require.NoError(t, err) + + // Set the pod phase and update it. + build.Status.Phase = phase + + // If we're successful, the build object should have an image pullspec attached to it. + // TODO: Need to figure out how / where to set this on the custom pod builder. + if phase == buildv1.BuildPhaseComplete { + build.Status.OutputDockerImageReference = expectedImagePullspecWithTag + build.Status.Output.To = &buildv1.BuildStatusOutputTo{ + ImageDigest: expectedImageSHA, + } + } + + _, err = cs.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Update(ctx, build, metav1.UpdateOptions{}) + require.NoError(t, err) + + // Look up the expected MCP condition for our current pod phase. + expectedMCPCondition := buildPhaseToMCPCondition[phase] + + // Look up the expected build pod condition for our current pod phase. + expectedBuildRefPresence := shouldHaveBuildRef[phase] + + var targetPool *mcfgv1.MachineConfigPool + + // Wait for the MCP condition to reach the expected state. + outcome = assertMachineConfigPoolReachesState(ctx, t, cs, mcp.Name, func(mcp *mcfgv1.MachineConfigPool) bool { + targetPool = mcp + return mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, expectedMCPCondition) && + expectedBuildRefPresence == machineConfigPoolHasBuildRef(mcp) && + machineConfigPoolHasMachineConfigRefs(mcp) + }) + + if !outcome { + spew.Dump(targetPool) + t.Logf("Has expected condition (%s) for phase (%s)? %v", expectedMCPCondition, phase, mcfgv1.IsMachineConfigPoolConditionTrue(targetPool.Status.Conditions, expectedMCPCondition)) + t.Logf("Has ref? %v. Expected: %v. Actual: %v.", expectedBuildRefPresence == machineConfigPoolHasBuildRef(targetPool), expectedBuildRefPresence, machineConfigPoolHasBuildRef(targetPool)) + t.Logf("Has MachineConfig refs? %v", machineConfigPoolHasMachineConfigRefs(targetPool)) + return false + } + } + + // Find out what happened to the build and its objects. + _, err := cs.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(ctx, buildName, metav1.GetOptions{}) + switch endingPhase { + case buildv1.BuildPhaseComplete: + // If the build pod was successful, looking it up should fail because it should have been deleted. + outcome = assert.Error(t, err) + default: + // If the build pod failed, looking it up should succeed since we leave it around for debugging. + outcome = assert.NoError(t, err) + } + + return outcome +} + +// Simulates a pod being scheduled and reaching various states. Verifies that +// the target MachineConfigPool reaches the expected states as it goes. +func assertMCPFollowsBuildPodStatus(ctx context.Context, t *testing.T, cs *Clients, mcp *mcfgv1.MachineConfigPool, endingPhase corev1.PodPhase) bool { //nolint:unparam // This param is actually used. + t.Helper() + + var outcome bool + + defer func() { + assert.True(t, outcome) + }() + + // Each of the various pod phases we're interested in. + podPhases := []corev1.PodPhase{ + corev1.PodPending, + corev1.PodRunning, + endingPhase, + } + + // Each pod phase is correllated to a MachineConfigPoolConditionType. + podPhaseToMCPCondition := map[corev1.PodPhase]mcfgv1.MachineConfigPoolConditionType{ + corev1.PodPending: mcfgv1.MachineConfigPoolBuildPending, + corev1.PodRunning: mcfgv1.MachineConfigPoolBuilding, + corev1.PodFailed: mcfgv1.MachineConfigPoolBuildFailed, + corev1.PodSucceeded: mcfgv1.MachineConfigPoolBuildSuccess, + } + + // Determine if the MachineConfigPool should have a reference to the build pod. + shouldHaveBuildPodRef := map[corev1.PodPhase]bool{ + corev1.PodPending: true, + corev1.PodRunning: true, + corev1.PodFailed: true, + corev1.PodSucceeded: false, + } + + ibr := newImageBuildRequest(mcp) + buildPodName := ibr.getBuildName() + + // Wait for the build pod to be created. + outcome = assertBuildPodIsCreated(ctx, t, cs, ibr) + if !outcome { + return outcome + } + + outcome = assertConfigMapsCreated(ctx, t, cs, ibr) + if !outcome { + return false + } + + // Cycle through each of the build pod phases. + for _, phase := range podPhases { + // Get the build pod by name. + buildPod, err := cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(ctx, buildPodName, metav1.GetOptions{}) + require.NoError(t, err) + + // Set the pod phase and update it. + buildPod.Status.Phase = phase + + // TODO: Figure out how to set / get the image pullspec from the build pod. + _, err = cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Update(ctx, buildPod, metav1.UpdateOptions{}) + require.NoError(t, err) + + // Look up the expected MCP condition for our current pod phase. + expectedMCPCondition := podPhaseToMCPCondition[phase] + + // Look up the expected build pod condition for our current pod phase. + expectedBuildPodRefPresence := shouldHaveBuildPodRef[phase] + + var targetPool *mcfgv1.MachineConfigPool + + // Wait for the MCP condition to reach the expected state. + outcome = assertMachineConfigPoolReachesState(ctx, t, cs, mcp.Name, func(mcp *mcfgv1.MachineConfigPool) bool { + targetPool = mcp + return mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, expectedMCPCondition) && + expectedBuildPodRefPresence == machineConfigPoolHasBuildRef(mcp) && + machineConfigPoolHasMachineConfigRefs(mcp) + }) + + if !outcome { + spew.Dump(targetPool) + t.Logf("Has expected condition (%s) for phase (%s)? %v", expectedMCPCondition, phase, mcfgv1.IsMachineConfigPoolConditionTrue(targetPool.Status.Conditions, expectedMCPCondition)) + t.Logf("Has ref? %v. Expected: %v. Actual: %v.", expectedBuildPodRefPresence == machineConfigPoolHasBuildRef(targetPool), expectedBuildPodRefPresence, machineConfigPoolHasBuildRef(targetPool)) + t.Logf("Has MachineConfig refs? %v", machineConfigPoolHasMachineConfigRefs(targetPool)) + return false + } + } + + // Find out what happened to the build pod. + _, err := cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(ctx, buildPodName, metav1.GetOptions{}) + switch endingPhase { + case corev1.PodSucceeded: + // If the build pod was successful, looking it up should fail because it should have been deleted. + outcome = assert.Error(t, err) + case corev1.PodFailed: + // If the build pod failed, looking it up should succeed since we leave it around for debugging. + outcome = assert.NoError(t, err) + } + + return outcome +} + +// Dumps all the objects within each of the fake clients to a YAML file for easy debugging. +func dumpObjects(ctx context.Context, t *testing.T, cs *Clients, filenamePrefix string) { + if cs.mcfgclient != nil { + mcp, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, mcp, filenamePrefix+"-machineconfigpools.yaml") + + machineconfigs, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigs().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, machineconfigs, filenamePrefix+"-machineconfigs.yaml") + } + + if cs.kubeclient != nil { + pods, err := cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, pods, filenamePrefix+"-pods.yaml") + + configmaps, err := cs.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + dumpToYAMLFile(t, configmaps, filenamePrefix+"-configmaps.yaml") + } + + if cs.buildclient != nil { + buildconfigs, err := cs.buildclient.BuildV1().BuildConfigs(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, buildconfigs, filenamePrefix+"-buildconfigs.yaml") + + builds, err := cs.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + dumpToYAMLFile(t, builds, filenamePrefix+"-builds.yaml") + } +} + +// Dumps the provided object to the given filename. +func dumpToYAMLFile(t *testing.T, obj interface{}, filename string) { + out, err := yaml.Marshal(obj) + require.NoError(t, err) + + filename = strings.ReplaceAll(filename, "/", "_") + + require.NoError(t, ioutil.WriteFile(filename, out, 0755)) +} diff --git a/pkg/controller/build/helpers.go b/pkg/controller/build/helpers.go new file mode 100644 index 0000000000..b51b314e5f --- /dev/null +++ b/pkg/controller/build/helpers.go @@ -0,0 +1,252 @@ +package build + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/containers/image/v5/docker/reference" + imagetypes "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + k8stypes "k8s.io/apimachinery/pkg/types" +) + +const ( + canonicalSecretSuffix string = "-canonical" +) + +// Compresses and base-64 encodes a given byte array. Ideal for loading an +// arbitrary byte array into a ConfigMap or Secret. +func compressAndEncode(payload []byte) (*bytes.Buffer, error) { + out := bytes.NewBuffer(nil) + + if len(payload) == 0 { + return out, nil + } + + // We need to base64-encode our gzipped data so we can marshal it in and out + // of a string since ConfigMaps and Secrets expect a textual representation. + base64Enc := base64.NewEncoder(base64.StdEncoding, out) + defer base64Enc.Close() + + err := compress(bytes.NewBuffer(payload), base64Enc) + if err != nil { + return nil, fmt.Errorf("could not compress and encode payload: %w", err) + } + + err = base64Enc.Close() + if err != nil { + return nil, fmt.Errorf("could not close base64 encoder: %w", err) + } + + return out, err +} + +// Compresses a given io.Reader to a given io.Writer +func compress(r io.Reader, w io.Writer) error { + gz, err := gzip.NewWriterLevel(w, gzip.BestCompression) + if err != nil { + return fmt.Errorf("could not initialize gzip writer: %w", err) + } + + defer gz.Close() + + if _, err := io.Copy(gz, r); err != nil { + return fmt.Errorf("could not compress payload: %w", err) + } + + if err := gz.Close(); err != nil { + return fmt.Errorf("could not close gzipwriter: %w", err) + } + + return nil +} + +// Parses the output of `$ skopeo inspect +// docker://registry.hostname/org/repo:latest` into a struct to get the image +// pullspec and digest. +func parseSkopeoOutputIntoImagePullspec(skopeoBytes []byte) (string, error) { + // Copy / pasta'ed from: https://github.com/containers/skopeo/blob/main/cmd/skopeo/inspect/output.go + type skopeoOutput struct { + Name string `json:",omitempty"` + Tag string `json:",omitempty"` + Digest digest.Digest + RepoTags []string + Created *time.Time + DockerVersion string + Labels map[string]string + Architecture string + Os string + Layers []string + LayersData []imagetypes.ImageInspectLayer + Env []string + } + + out := &skopeoOutput{} + + if err := json.Unmarshal(skopeoBytes, out); err != nil { + return "", err + } + + return parseImagePullspecWithDigest(out.Name, out.Digest) +} + +// Replaces any tags on the image pullspec with the provided image digest. +func parseImagePullspecWithDigest(pullspec string, imageDigest digest.Digest) (string, error) { + named, err := reference.ParseNamed(pullspec) + if err != nil { + return "", err + } + + canonical, err := reference.WithDigest(reference.TrimNamed(named), imageDigest) + if err != nil { + return "", err + } + + return canonical.String(), nil +} + +// Parses an image pullspec from a string and an image SHA and replaces any +// tags on the pullspec with the provided image SHA. +func parseImagePullspec(pullspec, imageSHA string) (string, error) { + imageDigest, err := digest.Parse(imageSHA) + if err != nil { + return "", err + } + + return parseImagePullspecWithDigest(pullspec, imageDigest) +} + +// Converts a legacy Docker pull secret into a more modern representation. +// Essentially, it converts {"registry.hostname.com": {"username": "user"...}} +// into {"auths": {"registry.hostname.com": {"username": "user"...}}}. If it +// encounters a pull secret already in this configuration, it will return the +// input secret as-is. Returns either the supplied data or the newly-configured +// representation of said data, a boolean to indicate whether it was converted, +// and any errors resulting from the conversion process. +func canonicalizePullSecretBytes(secretBytes []byte) ([]byte, bool, error) { + type newStyleAuth struct { + Auths map[string]interface{} `json:"auths,omitempty"` + } + + // Try marshaling the new-style secret first: + newStyleDecoded := &newStyleAuth{} + if err := json.Unmarshal(secretBytes, newStyleDecoded); err != nil { + return nil, false, fmt.Errorf("could not decode new-style pull secret: %w", err) + } + + // We have an new-style secret, so we can just return here. + if len(newStyleDecoded.Auths) != 0 { + return secretBytes, false, nil + } + + // We need to convert the legacy-style secret to the new-style. + oldStyleDecoded := map[string]interface{}{} + if err := json.Unmarshal(secretBytes, &oldStyleDecoded); err != nil { + return nil, false, fmt.Errorf("could not decode legacy-style pull secret: %w", err) + } + + out, err := json.Marshal(&newStyleAuth{ + Auths: oldStyleDecoded, + }) + + return out, err == nil, err +} + +// Performs the above operation upon a given secret, potentially creating a new +// secret for insertion with the suffix '-canonical' on its name. +func canonicalizePullSecret(secret *corev1.Secret) (*corev1.Secret, error) { + secret = secret.DeepCopy() + + key, err := getPullSecretKey(secret) + if err != nil { + return nil, err + } + + secretBytes, ok := secret.Data[key] + if !ok { + return nil, fmt.Errorf("could not locate key %q in %s", key, secret.Name) + } + + canonicalizedSecretBytes, canonicalized, err := canonicalizePullSecretBytes(secretBytes) + if err != nil { + return nil, err + } + + if !canonicalized { + return secret, nil + } + + out := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s%s", secret.Name, canonicalSecretSuffix), + Namespace: secret.Namespace, + }, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: canonicalizedSecretBytes, + }, + Type: corev1.SecretTypeDockerConfigJson, + } + + return out, nil +} + +// Looks up a given secret key for a given secret type and validates that the +// key is present and the secret is a non-zero length. Returns an error if it +// is the incorrect secret type, missing the appropriate key, or the secret is +// a zero-length. +func getPullSecretKey(secret *corev1.Secret) (string, error) { + if secret.Type != corev1.SecretTypeDockerConfigJson && secret.Type != corev1.SecretTypeDockercfg { + return "", fmt.Errorf("unknown secret type %s", secret.Type) + } + + secretTypes := map[corev1.SecretType]string{ + corev1.SecretTypeDockercfg: corev1.DockerConfigKey, + corev1.SecretTypeDockerConfigJson: corev1.DockerConfigJsonKey, + } + + key := secretTypes[secret.Type] + + val, ok := secret.Data[key] + if !ok { + return "", fmt.Errorf("missing %q in %s", key, secret.Name) + } + + if len(val) == 0 { + return "", fmt.Errorf("empty value %q in %s", key, secret.Name) + } + + return key, nil +} + +// Converts a given Kube object into an object reference. +func toObjectRef(obj interface { + GetName() string + GetNamespace() string + GetUID() k8stypes.UID + GetObjectKind() schema.ObjectKind +}) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: obj.GetObjectKind().GroupVersionKind().Kind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + UID: obj.GetUID(), + } +} + +// Returns any supplied error except ones that match k8serrors.IsNotFound(). +func ignoreIsNotFoundErr(err error) error { + if err != nil && !k8serrors.IsNotFound(err) { + return err + } + + return nil +} diff --git a/pkg/controller/build/helpers_test.go b/pkg/controller/build/helpers_test.go new file mode 100644 index 0000000000..18cdfe11b4 --- /dev/null +++ b/pkg/controller/build/helpers_test.go @@ -0,0 +1,163 @@ +package build + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Tests that a given image pullspec with a tag and SHA is correctly substituted. +func TestParseImagePullspec(t *testing.T) { + t.Parallel() + + out, err := parseImagePullspec(expectedImagePullspecWithTag, expectedImageSHA) + assert.NoError(t, err) + assert.Equal(t, expectedImagePullspecWithSHA, out) +} + +// Tests that Skopeo output is correctly parsed. For brevity, I did not include the full output. +func TestParseSkopeoOutput(t *testing.T) { + t.Parallel() + + skopeoOutput := `{ + "Name": "quay.io/zzlotnik/testing", + "Digest": "sha256:c2a723564f370e80df76f8355c410934fa0b274f406e5cbdc22075f796b63f4e" + }` + + out, err := parseSkopeoOutputIntoImagePullspec([]byte(skopeoOutput)) + assert.NoError(t, err) + assert.Equal(t, "quay.io/zzlotnik/testing@sha256:c2a723564f370e80df76f8355c410934fa0b274f406e5cbdc22075f796b63f4e", out) +} + +// Tests that pull secrets are canonicalized. In other words, converted from +// the legacy-style pull secret to the new-style secret. +func TestCanonicalizePullSecret(t *testing.T) { + t.Parallel() + + legacySecret := `{"registry.hostname.com": {"username": "user", "password": "s3kr1t", "auth": "s00pers3kr1t", "email": "user@hostname.com"}}` + + newSecret := `{"auths":` + legacySecret + `}` + + testCases := []struct { + name string + inputSecret *corev1.Secret + expectCanonical bool + expectError bool + }{ + { + name: "new-style secret dockerconfigjson", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: []byte(newSecret), + }, + Type: corev1.SecretTypeDockerConfigJson, + }, + expectCanonical: false, + }, + { + name: "new-style secret dockercfg", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + corev1.DockerConfigKey: []byte(newSecret), + }, + Type: corev1.SecretTypeDockercfg, + }, + expectCanonical: false, + }, + { + name: "legacy secret dockercfg", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + corev1.DockerConfigKey: []byte(legacySecret), + }, + Type: corev1.SecretTypeDockercfg, + }, + expectCanonical: true, + }, + { + name: "legacy secret dockerconfigjson", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: []byte(legacySecret), + }, + Type: corev1.SecretTypeDockerConfigJson, + }, + expectCanonical: true, + }, + { + name: "empty secret", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + corev1.DockerConfigKey: {}, + }, + Type: corev1.SecretTypeDockercfg, + }, + expectError: true, + }, + { + name: "unknown key secret", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + "unknown-key": []byte(newSecret), + }, + }, + expectError: true, + }, + { + name: "unknown secret type", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + corev1.DockerConfigKey: []byte(newSecret), + }, + Type: corev1.SecretTypeOpaque, + }, + expectError: true, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + out, err := canonicalizePullSecret(testCase.inputSecret) + if testCase.expectError { + assert.Error(t, err) + return + } else { + assert.NoError(t, err) + } + + if testCase.expectCanonical { + assert.Contains(t, out.Name, "canonical") + } + + for _, val := range out.Data { + assert.JSONEq(t, newSecret, string(val)) + } + }) + } +} diff --git a/pkg/controller/build/image_build_controller.go b/pkg/controller/build/image_build_controller.go new file mode 100644 index 0000000000..ad272c06b4 --- /dev/null +++ b/pkg/controller/build/image_build_controller.go @@ -0,0 +1,335 @@ +package build + +import ( + "context" + "fmt" + "strings" + "time" + + buildv1 "github.com/openshift/api/build/v1" + buildlistersv1 "github.com/openshift/client-go/build/listers/build/v1" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// Controller defines the build controller. +type ImageBuildController struct { + *Clients + *informers + + eventRecorder record.EventRecorder + + // The function to call whenever we've encountered a Build. This function is + // responsible for examining the Build to determine what state its in and map + // that state to the appropriate MachineConfigPool object. + buildHandler func(*buildv1.Build) error + + syncHandler func(pod string) error + enqueueBuild func(*buildv1.Build) + + buildLister buildlistersv1.BuildLister + + buildListerSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface + + config BuildControllerConfig +} + +var _ ImageBuilder = (*ImageBuildController)(nil) + +// Returns a new image build controller. +func newImageBuildController( + ctrlConfig BuildControllerConfig, + clients *Clients, + buildHandler func(*buildv1.Build) error, +) *ImageBuildController { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: clients.kubeclient.CoreV1().Events("")}) + + ctrl := &ImageBuildController{ + Clients: clients, + informers: newInformers(clients), + eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineosbuilder-imagebuildcontroller"}), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineosbuilder-imagebuildcontroller"), + config: ctrlConfig, + buildHandler: buildHandler, + } + + // As an aside, why doesn't the constructor here set up all the informers? + ctrl.buildInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addBuild, + UpdateFunc: ctrl.updateBuild, + DeleteFunc: ctrl.deleteBuild, + }) + + ctrl.buildLister = ctrl.buildInformer.Lister() + ctrl.buildListerSynced = ctrl.buildInformer.Informer().HasSynced + + ctrl.syncHandler = ctrl.syncBuild + ctrl.enqueueBuild = ctrl.enqueueDefault + + return ctrl +} + +func (ctrl *ImageBuildController) enqueue(build *buildv1.Build) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(build) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", build, err)) + return + } + + ctrl.queue.Add(key) +} + +func (ctrl *ImageBuildController) enqueueRateLimited(build *buildv1.Build) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(build) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", build, err)) + return + } + + ctrl.queue.AddRateLimited(key) +} + +// enqueueAfter will enqueue a build after the provided amount of time. +func (ctrl *ImageBuildController) enqueueAfter(build *buildv1.Build, after time.Duration) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(build) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", build, err)) + return + } + + ctrl.queue.AddAfter(key, after) +} + +// enqueueDefault calls a default enqueue function +func (ctrl *ImageBuildController) enqueueDefault(build *buildv1.Build) { + ctrl.enqueueAfter(build, ctrl.config.UpdateDelay) +} + +// Syncs Builds. +func (ctrl *ImageBuildController) syncBuild(key string) error { //nolint:dupl // This does have commonality with the PodBuildController. + start := time.Now() + defer func() { + klog.Infof("Finished syncing pod %s: %s", key, time.Since(start)) + }() + klog.Infof("Started syncing pod %s", key) + + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + // TODO: Why do I need to set the namespace here? + build, err := ctrl.buildLister.Builds(ctrlcommon.MCONamespace).Get(name) + if k8serrors.IsNotFound(err) { + klog.V(2).Infof("Build %v has been deleted", key) + return nil + } + if err != nil { + return err + } + + build, err = ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), build.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !hasAllRequiredOSBuildLabels(build.Labels) { + klog.Infof("Ignoring non-OS image build %s", build.Name) + return nil + } + + if err := ctrl.buildHandler(build); err != nil { + return fmt.Errorf("unable to update with build status: %w", err) + } + + klog.Infof("Updated MachineConfigPool with build status. Build %s in %s", build.Name, build.Status.Phase) + + return nil +} + +// Starts the Image Build Controller. +func (ctrl *ImageBuildController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer ctrl.queue.ShutDown() + + ctrl.informers.start(ctx) + + if !cache.WaitForCacheSync(ctx.Done(), ctrl.buildListerSynced) { + return + } + + klog.Info("Starting MachineOSBuilder-ImageBuildController") + defer klog.Info("Shutting down MachineOSBuilder-ImageBuildController") + + for i := 0; i < workers; i++ { + go wait.Until(ctrl.worker, time.Second, ctx.Done()) + } + + <-ctx.Done() +} + +// Gets the final image pullspec. In this case, we can interrogate the Build +// object for this information. +func (ctrl *ImageBuildController) FinalPullspec(pool *mcfgv1.MachineConfigPool) (string, error) { + buildName := newImageBuildRequest(pool).getBuildName() + + build, err := ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("could not get build %s for pool %s: %w", buildName, pool.Name, err) + } + + // Get the image digest from the completed build and replace the tag with + // the digest. + if build.Status.OutputDockerImageReference == "" { + return "", fmt.Errorf("no image reference outputted") + } + + if build.Status.Output.To.ImageDigest == "" { + return "", fmt.Errorf("no image digest found") + } + + return parseImagePullspec(build.Status.OutputDockerImageReference, build.Status.Output.To.ImageDigest) +} + +// Deletes the underlying Build object. +func (ctrl *ImageBuildController) DeleteBuildObject(pool *mcfgv1.MachineConfigPool) error { + buildName := newImageBuildRequest(pool).getBuildName() + return ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Delete(context.TODO(), buildName, metav1.DeleteOptions{}) +} + +// Determines if a build is currently running by looking for a corresponding Build. +func (ctrl *ImageBuildController) IsBuildRunning(pool *mcfgv1.MachineConfigPool) (bool, error) { + buildName := newImageBuildRequest(pool).getBuildName() + + // First check if we have a build in progress for this MachineConfigPool and rendered config. + _, err := ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return false, err + } + + return err == nil, nil +} + +// Starts a new build, assuming one is not found first. In that case, it +// returns an object reference to the preexisting Build object. +func (ctrl *ImageBuildController) StartBuild(ibr ImageBuildRequest) (*corev1.ObjectReference, error) { + targetMC := ibr.Pool.Spec.Configuration.Name + + buildName := ibr.getBuildName() + + // TODO: Find a constant for this: + if !strings.HasPrefix(targetMC, "rendered-") { + return nil, fmt.Errorf("%s is not a rendered MachineConfig", targetMC) + } + + // First check if we have a build in progress for this MachineConfigPool and rendered config. + build, err := ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return nil, err + } + + // This means we found a preexisting build build. + if build != nil && err == nil && hasAllRequiredOSBuildLabels(build.Labels) { + klog.Infof("Found preexisting OS image build (%s) for pool %s", build.Name, ibr.Pool.Name) + return toObjectRef(build), nil + } + + klog.Infof("Starting build for pool %s", ibr.Pool.Name) + klog.Infof("Build name: %s", buildName) + klog.Infof("Final image will be pushed to %q, using secret %q", ibr.FinalImage.Pullspec, ibr.FinalImage.PullSecret.Name) + + build, err = ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Create(context.TODO(), ibr.toBuild(), metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("could not create OS image build: %w", err) + } + + klog.Infof("Build started for pool %s in %s!", ibr.Pool.Name, build.Name) + + return toObjectRef(build), nil +} + +// Fires whenever a Build is added. +func (ctrl *ImageBuildController) addBuild(obj interface{}) { + build := obj.(*buildv1.Build).DeepCopy() + klog.V(4).Infof("Adding Build %s. Is OS image build? %v", build.Name, hasAllRequiredOSBuildLabels(build.Labels)) + if hasAllRequiredOSBuildLabels(build.Labels) { + ctrl.enqueueBuild(build) + } +} + +// Fires whenever a Build is updated. +func (ctrl *ImageBuildController) updateBuild(_, curObj interface{}) { + curBuild := curObj.(*buildv1.Build).DeepCopy() + + isOSImageBuild := hasAllRequiredOSBuildLabels(curBuild.Labels) + + klog.Infof("Updating build %s. Is OS image build? %v", curBuild.Name, isOSImageBuild) + + // Ignore non-OS image builds. + // TODO: Figure out if we can add the filter criteria onto the lister. + if !isOSImageBuild { + return + } + + klog.Infof("Build %s updated", curBuild.Name) + + ctrl.enqueueBuild(curBuild) +} + +func (ctrl *ImageBuildController) handleErr(err error, key interface{}) { + if err == nil { + ctrl.queue.Forget(key) + return + } + + if ctrl.queue.NumRequeues(key) < ctrl.config.MaxRetries { + klog.V(2).Infof("Error syncing build %v: %v", key, err) + ctrl.queue.AddRateLimited(key) + return + } + + utilruntime.HandleError(err) + klog.V(2).Infof("Dropping build %q out of the queue: %v", key, err) + ctrl.queue.Forget(key) + ctrl.queue.AddAfter(key, 1*time.Minute) +} + +func (ctrl *ImageBuildController) deleteBuild(obj interface{}) { + build := obj.(*buildv1.Build).DeepCopy() + klog.V(4).Infof("Deleting Build %s. Is OS image build? %v", build.Name, hasAllRequiredOSBuildLabels(build.Labels)) + ctrl.enqueueBuild(build) +} + +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncHandler is never invoked concurrently with the same key. +func (ctrl *ImageBuildController) worker() { + for ctrl.processNextWorkItem() { + } +} + +func (ctrl *ImageBuildController) processNextWorkItem() bool { + key, quit := ctrl.queue.Get() + if quit { + return false + } + defer ctrl.queue.Done(key) + + err := ctrl.syncHandler(key.(string)) + ctrl.handleErr(err, key) + + return true +} diff --git a/pkg/controller/build/image_build_request.go b/pkg/controller/build/image_build_request.go new file mode 100644 index 0000000000..3c5b568b24 --- /dev/null +++ b/pkg/controller/build/image_build_request.go @@ -0,0 +1,423 @@ +package build + +import ( + _ "embed" + "encoding/json" + "fmt" + "strings" + "text/template" + + buildv1 "github.com/openshift/api/build/v1" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + mcPoolAnnotation string = "machineconfiguration.openshift.io/pool" + machineConfigJSONFilename string = "machineconfig.json.gz" + buildahImagePullspec string = "quay.io/buildah/stable:latest" + skopeoImagePullspec string = "quay.io/skopeo/stable:latest" +) + +//go:embed assets/Dockerfile.on-cluster-build-template +var dockerfileTemplate string + +//go:embed assets/wait.sh +var waitScript string + +//go:embed assets/build.sh +var buildScript string + +// Represents a given image pullspec and the location of the pull secret. +type ImageInfo struct { + // The pullspec for a given image (e.g., registry.hostname.com/orp/repo:tag) + Pullspec string + // The name of the K8s secret required for pulling the aforementioned image. + PullSecret corev1.LocalObjectReference +} + +// Represents the request to build a layered OS image. +type ImageBuildRequest struct { + // The target MachineConfigPool + Pool *mcfgv1.MachineConfigPool + // The base OS image (derived from the machine-config-osimageurl ConfigMap) + BaseImage ImageInfo + // The extensions image (derived from the machine-config-osimageurl ConfigMap) + ExtensionsImage ImageInfo + // The final OS image (desired from the on-cluster-build-config ConfigMap) + FinalImage ImageInfo + // The OpenShift release version (derived from the machine-config-osimageurl ConfigMap) + ReleaseVersion string +} + +// Constructs a simple ImageBuildRequest. +func newImageBuildRequest(pool *mcfgv1.MachineConfigPool) ImageBuildRequest { + return ImageBuildRequest{ + Pool: pool.DeepCopy(), + } +} + +// Populates the final image info from the on-cluster-build-config ConfigMap. +func newFinalImageInfo(onClusterBuildConfigMap *corev1.ConfigMap) ImageInfo { + return ImageInfo{ + Pullspec: onClusterBuildConfigMap.Data[finalImagePullspecConfigKey], + PullSecret: corev1.LocalObjectReference{ + Name: onClusterBuildConfigMap.Data[finalImagePushSecretNameConfigKey], + }, + } +} + +// Populates the base image info from both the on-cluster-build-config and +// machine-config-osimageurl ConfigMaps. +func newBaseImageInfo(osImageURLConfigMap, onClusterBuildConfigMap *corev1.ConfigMap) ImageInfo { + return ImageInfo{ + Pullspec: osImageURLConfigMap.Data[baseOSContainerImageConfigKey], + PullSecret: corev1.LocalObjectReference{ + Name: onClusterBuildConfigMap.Data[baseImagePullSecretNameConfigKey], + }, + } +} + +// Populates the extensions image info from both the on-cluster-build-config +// and machine-config-osimageurl ConfigMaps. +func newExtensionsImageInfo(osImageURLConfigMap, onClusterBuildConfigMap *corev1.ConfigMap) ImageInfo { + return ImageInfo{ + Pullspec: osImageURLConfigMap.Data[baseOSExtensionsContainerImageConfigKey], + PullSecret: corev1.LocalObjectReference{ + Name: onClusterBuildConfigMap.Data[baseImagePullSecretNameConfigKey], + }, + } +} + +// Constructs an ImageBuildRequest with all of the images populated from ConfigMaps +func newImageBuildRequestWithConfigMap(pool *mcfgv1.MachineConfigPool, osImageURLConfigMap, onClusterBuildConfigMap *corev1.ConfigMap) ImageBuildRequest { + return ImageBuildRequest{ + Pool: pool.DeepCopy(), + BaseImage: newBaseImageInfo(osImageURLConfigMap, onClusterBuildConfigMap), + FinalImage: newFinalImageInfo(onClusterBuildConfigMap), + ExtensionsImage: newExtensionsImageInfo(osImageURLConfigMap, onClusterBuildConfigMap), + ReleaseVersion: osImageURLConfigMap.Data[releaseVersionConfigKey], + } +} + +// Renders our Dockerfile and injects it into a ConfigMap for consumption by the image builder. +func (i ImageBuildRequest) dockerfileToConfigMap() (*corev1.ConfigMap, error) { + dockerfile, err := i.renderDockerfile() + if err != nil { + return nil, err + } + + configmap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: i.getObjectMeta(i.getDockerfileConfigMapName()), + Data: map[string]string{ + "Dockerfile": dockerfile, + }, + } + + return configmap, nil +} + +// Stuffs a given MachineConfig into a ConfigMap, gzipping and base64-encoding it. +func (i ImageBuildRequest) toConfigMap(mc *mcfgv1.MachineConfig) (*corev1.ConfigMap, error) { + out, err := json.Marshal(mc) + if err != nil { + return nil, fmt.Errorf("could not encode MachineConfig %s: %w", mc.Name, err) + } + + // TODO: Check for size here and determine if its too big. ConfigMaps and + // Secrets have a size limit of 1 MB. Compressing and encoding the + // MachineConfig provides us with additional headroom. However, if the + // MachineConfig grows large enough, we may need to do something more + // involved. + compressed, err := compressAndEncode(out) + if err != nil { + return nil, fmt.Errorf("could not compress or encode MachineConfig %s: %w", mc.Name, err) + } + + configmap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: i.getObjectMeta(i.getMCConfigMapName()), + Data: map[string]string{ + machineConfigJSONFilename: compressed.String(), + }, + } + + return configmap, nil +} + +// Renders our Dockerfile template. +// +// TODO: Figure out how to parse the Dockerfile using +// https://github.com/openshift/imagebuilder/tree/master/dockerfile/parser to +// ensure that we've generated a valid Dockerfile. +// +// TODO: Figure out how to programatically generate the Dockerfile using a +// higher-level abstraction than just naïvely rendering a text template and +// hoping for the best. +func (i ImageBuildRequest) renderDockerfile() (string, error) { + tmpl, err := template.New("dockerfile").Parse(dockerfileTemplate) + if err != nil { + return "", err + } + + out := &strings.Builder{} + + if err := tmpl.Execute(out, i); err != nil { + return "", err + } + + return out.String(), nil +} + +// Creates an OpenShift Image Builder build object prewired with all ConfigMaps +// / Secrets / etc. +func (i ImageBuildRequest) toBuild() *buildv1.Build { + skipLayers := buildv1.ImageOptimizationSkipLayers + + // The Build API requires the Dockerfile field to be set, even if you + // override it via a ConfigMap. + dockerfile := "FROM scratch" + + return &buildv1.Build{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: i.getObjectMeta(i.getBuildName()), + Spec: buildv1.BuildSpec{ + CommonSpec: buildv1.CommonSpec{ + // TODO: We may need to configure a Build Input here so we can wire up + // the pull secrets for the base OS image and the extensions image. + Source: buildv1.BuildSource{ + Type: buildv1.BuildSourceDockerfile, + Dockerfile: &dockerfile, + ConfigMaps: []buildv1.ConfigMapBuildSource{ + { + // Provides the rendered MachineConfig in a gzipped / + // base64-encoded format. + ConfigMap: corev1.LocalObjectReference{ + Name: i.getMCConfigMapName(), + }, + DestinationDir: "machineconfig", + }, + { + // Provides the rendered Dockerfile. + ConfigMap: corev1.LocalObjectReference{ + Name: i.getDockerfileConfigMapName(), + }, + }, + }, + }, + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{ + // Squashing layers is good as long as it doesn't cause problems with what + // the users want to do. It says "some syntax is not supported" + ImageOptimizationPolicy: &skipLayers, + }, + Type: buildv1.DockerBuildStrategyType, + }, + Output: buildv1.BuildOutput{ + To: &corev1.ObjectReference{ + Name: i.FinalImage.Pullspec, + Kind: "DockerImage", + }, + PushSecret: &i.FinalImage.PullSecret, + ImageLabels: []buildv1.ImageLabel{ + {Name: "io.openshift.machineconfig.pool", Value: i.Pool.Name}, + }, + }, + }, + }, + } +} + +// Creates a custom image build pod to build the final OS image with all +// ConfigMaps / Secrets / etc. wired into it. +func (i ImageBuildRequest) toBuildPod() *corev1.Pod { + env := []corev1.EnvVar{ + { + Name: "HOME", + Value: "/home/build", + }, + { + Name: "TAG", + Value: i.FinalImage.Pullspec, + }, + { + Name: "BASE_IMAGE_PULL_CREDS", + Value: "/tmp/base-image-pull-creds/config.json", + }, + { + Name: "FINAL_IMAGE_PUSH_CREDS", + Value: "/tmp/final-image-push-creds/config.json", + }, + } + + var uid int64 = 1000 + var gid int64 = 1000 + + securityContext := &corev1.SecurityContext{ + RunAsUser: &uid, + RunAsGroup: &gid, + } + + command := []string{"/bin/bash", "-c"} + + volumeMounts := []corev1.VolumeMount{ + { + Name: "machineconfig", + MountPath: "/tmp/machineconfig", + }, + { + Name: "dockerfile", + MountPath: "/tmp/dockerfile", + }, + { + Name: "base-image-pull-creds", + MountPath: "/tmp/base-image-pull-creds", + }, + { + Name: "final-image-push-creds", + MountPath: "/tmp/final-image-push-creds", + }, + { + Name: "done", + MountPath: "/tmp/done", + }, + } + + // TODO: We need pull creds with permissions to pull the base image. By + // default, none of the MCO pull secrets can directly pull it. I injected my + // own pull creds to do that. + return &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: i.getObjectMeta(i.getBuildName()), + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + // This container performs the image build / push process. + Name: "image-build", + // TODO: Figure out how to not hard-code this here. + Image: buildahImagePullspec, + Env: env, + Command: append(command, buildScript), + ImagePullPolicy: corev1.PullAlways, + SecurityContext: securityContext, + VolumeMounts: volumeMounts, + }, + { + // This container waits for the aforementioned container to finish + // building so we can get the final image SHA. + Name: "wait-for-done", + Env: env, + Command: append(command, waitScript), + Image: skopeoImagePullspec, + ImagePullPolicy: corev1.PullAlways, + SecurityContext: securityContext, + VolumeMounts: volumeMounts, + }, + }, + Volumes: []corev1.Volume{ + { + // Provides the rendered Dockerfile. + Name: "dockerfile", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: i.getDockerfileConfigMapName(), + }, + }, + }, + }, + { + // Provides the rendered MachineConfig in a gzipped / base64-encoded + // format. + Name: "machineconfig", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: i.getMCConfigMapName(), + }, + }, + }, + }, + { + // Provides the credentials needed to pull the base OS image. + Name: "base-image-pull-creds", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: i.BaseImage.PullSecret.Name, + Items: []corev1.KeyToPath{ + { + Key: corev1.DockerConfigJsonKey, + Path: "config.json", + }, + }, + }, + }, + }, + { + // Provides the credentials needed to push the final OS image. + Name: "final-image-push-creds", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: i.FinalImage.PullSecret.Name, + Items: []corev1.KeyToPath{ + { + Key: corev1.DockerConfigJsonKey, + Path: "config.json", + }, + }, + }, + }, + }, + { + // Provides a way for the "image-build" container to signal that it + // finished so that the "wait-for-done" container can retrieve the + // iamge SHA. + Name: "done", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }, + }, + }, + } +} + +// Constructs a common metav1.ObjectMeta object with the namespace, labels, and annotations set. +func (i ImageBuildRequest) getObjectMeta(name string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: name, + Namespace: ctrlcommon.MCONamespace, + Labels: map[string]string{ + ctrlcommon.OSImageBuildPodLabel: "", + targetMachineConfigPoolLabel: i.Pool.Name, + desiredConfigLabel: i.Pool.Spec.Configuration.Name, + }, + Annotations: map[string]string{ + mcPoolAnnotation: "", + }, + } +} + +// Computes the Dockerfile ConfigMap name based upon the MachineConfigPool name. +func (i ImageBuildRequest) getDockerfileConfigMapName() string { + return fmt.Sprintf("dockerfile-%s", i.Pool.Spec.Configuration.Name) +} + +// Computes the MachineConfig ConfigMap name based upon the MachineConfigPool name. +func (i ImageBuildRequest) getMCConfigMapName() string { + return fmt.Sprintf("mc-%s", i.Pool.Spec.Configuration.Name) +} + +// Computes the build name based upon the MachineConfigPool name. +func (i ImageBuildRequest) getBuildName() string { + return fmt.Sprintf("build-%s", i.Pool.Spec.Configuration.Name) +} diff --git a/pkg/controller/build/image_build_request_test.go b/pkg/controller/build/image_build_request_test.go new file mode 100644 index 0000000000..fa8b4bcf85 --- /dev/null +++ b/pkg/controller/build/image_build_request_test.go @@ -0,0 +1,80 @@ +package build + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// Tests that Image Build Requests is constructed as expected and does a +// (mostly) smoke test of its methods. +func TestImageBuildRequest(t *testing.T) { + t.Parallel() + + mcp := newMachineConfigPool("worker", "rendered-worker-1") + + osImageURLConfigMap := getOSImageURLConfigMap() + onClusterBuildConfigMap := getOnClusterBuildConfigMap() + + ibr := newImageBuildRequestWithConfigMap(mcp, osImageURLConfigMap, onClusterBuildConfigMap) + + dockerfile, err := ibr.renderDockerfile() + assert.NoError(t, err) + + expectedDockerfileContents := []string{ + osImageURLConfigMap.Data[releaseVersionConfigKey], + osImageURLConfigMap.Data[baseOSContainerImageConfigKey], + osImageURLConfigMap.Data[baseOSExtensionsContainerImageConfigKey], + mcp.Name, + mcp.Spec.Configuration.Name, + machineConfigJSONFilename, + } + + for _, content := range expectedDockerfileContents { + assert.Contains(t, dockerfile, content) + } + + assert.NotNil(t, ibr.toBuild()) + assert.NotNil(t, ibr.toBuildPod()) + + dockerfileConfigmap, err := ibr.dockerfileToConfigMap() + assert.NoError(t, err) + assert.NotNil(t, dockerfileConfigmap) + assert.Equal(t, dockerfileConfigmap.Data["Dockerfile"], dockerfile) + + assert.Equal(t, osImageURLConfigMap.Data[baseOSContainerImageConfigKey], ibr.BaseImage.Pullspec) + assert.Equal(t, osImageURLConfigMap.Data[baseOSExtensionsContainerImageConfigKey], ibr.ExtensionsImage.Pullspec) + + assert.Equal(t, onClusterBuildConfigMap.Data[baseImagePullSecretNameConfigKey], ibr.BaseImage.PullSecret.Name) + assert.Equal(t, onClusterBuildConfigMap.Data[baseImagePullSecretNameConfigKey], ibr.ExtensionsImage.PullSecret.Name) + + assert.Equal(t, onClusterBuildConfigMap.Data[finalImagePullspecConfigKey], ibr.FinalImage.Pullspec) + + assert.Equal(t, onClusterBuildConfigMap.Data[finalImagePushSecretNameConfigKey], ibr.FinalImage.PullSecret.Name) + + assert.Equal(t, "dockerfile-rendered-worker-1", ibr.getDockerfileConfigMapName()) + assert.Equal(t, "build-rendered-worker-1", ibr.getBuildName()) + assert.Equal(t, "mc-rendered-worker-1", ibr.getMCConfigMapName()) +} + +// Tests that the Dockerfile is correctly rendered in the absence of the +// extensions image. For now, we just check whether the extensions image is +// imported. Once we wire up the extensions container, we'll need to modify +// this to ensure that the remainder of the Dockerfile gets rendered correctly. +func TestImageBuildRequestMissingExtensionsImage(t *testing.T) { + t.Parallel() + + mcp := newMachineConfigPool("worker", "rendered-worker-1") + + osImageURLConfigMap := getOSImageURLConfigMap() + onClusterBuildConfigMap := getOnClusterBuildConfigMap() + + delete(osImageURLConfigMap.Data, baseOSExtensionsContainerImageConfigKey) + + ibr := newImageBuildRequestWithConfigMap(mcp, osImageURLConfigMap, onClusterBuildConfigMap) + + dockerfile, err := ibr.renderDockerfile() + assert.NoError(t, err) + + assert.NotContains(t, dockerfile, "AS extensions") +} diff --git a/pkg/controller/build/pod_build_controller.go b/pkg/controller/build/pod_build_controller.go new file mode 100644 index 0000000000..26fb853072 --- /dev/null +++ b/pkg/controller/build/pod_build_controller.go @@ -0,0 +1,359 @@ +package build + +import ( + "context" + "fmt" + "strings" + "time" + + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" + corelistersv1 "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// Controller defines the build controller. +type PodBuildController struct { + *Clients + *informers + + eventRecorder record.EventRecorder + + // The function to call whenever we've encountered a build pod. This function is + // responsible for examining the build pod to determine what state its in and map + // that state to the appropriate MachineConfigPool object. + podHandler func(*corev1.Pod) error + + syncHandler func(pod string) error + enqueuePod func(*corev1.Pod) + + podLister corelistersv1.PodLister + + podListerSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface + + config BuildControllerConfig +} + +var _ ImageBuilder = (*PodBuildController)(nil) + +// Returns a new pod build controller. +func newPodBuildController( + ctrlConfig BuildControllerConfig, + clients *Clients, + podHandler func(*corev1.Pod) error, +) *PodBuildController { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: clients.kubeclient.CoreV1().Events("")}) + + ctrl := &PodBuildController{ + Clients: clients, + informers: newInformers(clients), + eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineosbuilder-podbuildcontroller"}), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineosbuilder-podbuildcontroller"), + config: ctrlConfig, + podHandler: podHandler, + } + + // As an aside, why doesn't the constructor here set up all the informers? + ctrl.podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addPod, + UpdateFunc: ctrl.updatePod, + DeleteFunc: ctrl.deletePod, + }) + + ctrl.podLister = ctrl.podInformer.Lister() + + ctrl.podListerSynced = ctrl.podInformer.Informer().HasSynced + + ctrl.syncHandler = ctrl.syncPod + ctrl.enqueuePod = ctrl.enqueueDefault + + return ctrl +} + +func (ctrl *PodBuildController) enqueue(pod *corev1.Pod) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pod) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pod, err)) + return + } + + ctrl.queue.Add(key) +} + +func (ctrl *PodBuildController) enqueueRateLimited(pod *corev1.Pod) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pod) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pod, err)) + return + } + + ctrl.queue.AddRateLimited(key) +} + +// enqueueAfter will enqueue a pod after the provided amount of time. +func (ctrl *PodBuildController) enqueueAfter(pod *corev1.Pod, after time.Duration) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pod) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pod, err)) + return + } + + ctrl.queue.AddAfter(key, after) +} + +// enqueueDefault calls a default enqueue function +func (ctrl *PodBuildController) enqueueDefault(pod *corev1.Pod) { + ctrl.enqueueAfter(pod, ctrl.config.UpdateDelay) +} + +// Syncs pods. +func (ctrl *PodBuildController) syncPod(key string) error { //nolint:dupl // This does have commonality with the ImageBuildController. + start := time.Now() + defer func() { + klog.Infof("Finished syncing pod %s: %s", key, time.Since(start)) + }() + klog.Infof("Started syncing pod %s", key) + + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + // TODO: Why do I need to set the namespace here? + pod, err := ctrl.podLister.Pods(ctrlcommon.MCONamespace).Get(name) + if k8serrors.IsNotFound(err) { + klog.V(2).Infof("Pod %v has been deleted", key) + return nil + } + if err != nil { + return err + } + + pod, err = ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + // If we don't have all of the OS build labels attached to this pod, we + // ignore it. There is probably something we can do along the lines looking + // at ownership though. + if !hasAllRequiredOSBuildLabels(pod.Labels) { + klog.Infof("Ignoring non-build pod %s", pod.Name) + return nil + } + + if err := ctrl.podHandler(pod); err != nil { + return fmt.Errorf("unable to update with build pod status: %w", err) + } + + klog.Infof("Updated MachineConfigPool with build pod status. Build pod %s in %s", pod.Name, pod.Status.Phase) + + return nil +} + +// Starts the Pod Build Controller. +func (ctrl *PodBuildController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer ctrl.queue.ShutDown() + + ctrl.informers.start(ctx) + + if !cache.WaitForCacheSync(ctx.Done(), ctrl.podListerSynced) { + return + } + + klog.Info("Starting MachineOSBuilder-PodBuildController") + defer klog.Info("Shutting down MachineOSBuilder-PodBuildController") + + for i := 0; i < workers; i++ { + go wait.Until(ctrl.worker, time.Second, ctx.Done()) + } + + <-ctx.Done() +} + +// Gets the final image pullspec by examining the logs of the wait-for-done +// container. This container runs `$ skopeo inspect +// docker://registry.hostname.com/org/repo:latest`, which dumps a JSON payload +// to stdout. This function snarfs that output into a JSON struct and does some +// munging to get the fully qualified image pullspec (i.e., +// registry.hostname.com/org/repo@sha256:...). +func (ctrl *PodBuildController) FinalPullspec(pool *mcfgv1.MachineConfigPool) (string, error) { + ibr := newImageBuildRequest(pool) + + buildName := ibr.getBuildName() + + pod, err := ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("could not get build pod %s for pool %s: %w", buildName, pool.Name, err) + } + + req := ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).GetLogs(pod.Name, &corev1.PodLogOptions{ + Container: "wait-for-done", + }) + + res := req.Do(context.TODO()) + if err := res.Error(); err != nil { + return "", err + } + + out, err := res.Raw() + if err != nil { + return "", err + } + + // Unfortunately, FakeClient has a "fake logs" value hardcoded within it. + // With that in mind, we need to short-circuit this. See: + // https://github.com/kubernetes/kubernetes/pull/91485. + if string(out) == "fake logs" { + return "fake@logs", nil + } + + return parseSkopeoOutputIntoImagePullspec(out) +} + +// Deletes the underlying build pod. +func (ctrl *PodBuildController) DeleteBuildObject(pool *mcfgv1.MachineConfigPool) error { + ibr := newImageBuildRequest(pool) + return ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getBuildName(), metav1.DeleteOptions{}) +} + +// Determines if a build is currently running by looking for a corresponding pod. +func (ctrl *PodBuildController) IsBuildRunning(pool *mcfgv1.MachineConfigPool) (bool, error) { + ibr := newImageBuildRequest(pool) + + // First check if we have a build in progress for this MachineConfigPool and rendered config. + _, err := ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(context.TODO(), ibr.getBuildName(), metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return false, err + } + + return err == nil, nil +} + +// Starts a new build pod, assuming one is not found first. In that case, it returns an object reference to the preexisting build pod. +func (ctrl *PodBuildController) StartBuild(ibr ImageBuildRequest) (*corev1.ObjectReference, error) { + targetMC := ibr.Pool.Spec.Configuration.Name + + // TODO: Find a constant for this: + if !strings.HasPrefix(targetMC, "rendered-") { + return nil, fmt.Errorf("%s is not a rendered MachineConfig", targetMC) + } + + // First check if we have a build in progress for this MachineConfigPool and rendered config. + pod, err := ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(context.TODO(), ibr.getBuildName(), metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return nil, err + } + + // This means we found a preexisting build pod. + if pod != nil && err == nil && hasAllRequiredOSBuildLabels(pod.Labels) { + klog.Infof("Found preexisting build pod (%s) for pool %s", pod.Name, ibr.Pool.Name) + return toObjectRef(pod), nil + } + + klog.Infof("Starting build for pool %s", ibr.Pool.Name) + klog.Infof("Build pod name: %s", ibr.getBuildName()) + klog.Infof("Final image will be pushed to %q, using secret %q", ibr.FinalImage.Pullspec, ibr.FinalImage.PullSecret.Name) + + pod, err = ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Create(context.TODO(), ibr.toBuildPod(), metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("could not create build pod: %w", err) + } + + klog.Infof("Build started for pool %s in %s!", ibr.Pool.Name, pod.Name) + + return toObjectRef(pod), nil +} + +// Fires whenever a new pod is started. +func (ctrl *PodBuildController) addPod(obj interface{}) { + pod := obj.(*corev1.Pod).DeepCopy() + isBuildPod := hasAllRequiredOSBuildLabels(pod.Labels) + klog.V(4).Infof("Adding Pod %s. Is build pod? %v", pod.Name, isBuildPod) + if isBuildPod { + ctrl.enqueuePod(pod) + } +} + +// Fires whenever a pod is updated. +func (ctrl *PodBuildController) updatePod(oldObj, curObj interface{}) { + oldPod := oldObj.(*corev1.Pod).DeepCopy() + curPod := curObj.(*corev1.Pod).DeepCopy() + + isBuildPod := hasAllRequiredOSBuildLabels(curPod.Labels) + + klog.Infof("Updating pod %s. Is build pod? %v", curPod.Name, isBuildPod) + + // Ignore non-build pods. + // TODO: Figure out if we can add the filter criteria onto the lister. + if !isBuildPod { + return + } + + if oldPod.Status.Phase != curPod.Status.Phase { + klog.Infof("Pod %s changed from %s to %s", oldPod.Name, oldPod.Status.Phase, curPod.Status.Phase) + } + + klog.Infof("Pod %s updated", curPod.Name) + + ctrl.enqueuePod(curPod) +} + +func (ctrl *PodBuildController) handleErr(err error, key interface{}) { + if err == nil { + ctrl.queue.Forget(key) + return + } + + if ctrl.queue.NumRequeues(key) < ctrl.config.MaxRetries { + klog.V(2).Infof("Error syncing pod %v: %v", key, err) + ctrl.queue.AddRateLimited(key) + return + } + + utilruntime.HandleError(err) + klog.V(2).Infof("Dropping pod %q out of the queue: %v", key, err) + ctrl.queue.Forget(key) + ctrl.queue.AddAfter(key, 1*time.Minute) +} + +// Fires whenever a pod is deleted. +func (ctrl *PodBuildController) deletePod(obj interface{}) { + pod := obj.(*corev1.Pod).DeepCopy() + klog.V(4).Infof("Deleting Pod %s. Is build pod? %v", pod.Name, hasAllRequiredOSBuildLabels(pod.Labels)) + ctrl.enqueuePod(pod) +} + +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncHandler is never invoked concurrently with the same key. +func (ctrl *PodBuildController) worker() { + for ctrl.processNextWorkItem() { + } +} + +func (ctrl *PodBuildController) processNextWorkItem() bool { + key, quit := ctrl.queue.Get() + if quit { + return false + } + defer ctrl.queue.Done(key) + + err := ctrl.syncHandler(key.(string)) + ctrl.handleErr(err, key) + + return true +} diff --git a/pkg/controller/build/pool_build_resources.go b/pkg/controller/build/pool_build_resources.go deleted file mode 100644 index 881d21b696..0000000000 --- a/pkg/controller/build/pool_build_resources.go +++ /dev/null @@ -1,100 +0,0 @@ -package build - -// This was arose from the need to keep track of resources the pool should ensure the presence of. -// It is not as elegant as it should be, but the problem I am trying to solve is capturing: -// 1.) what resources the pool should ensure the presence of and -// 2.) the relationships between them - -import ( - "bytes" - "text/template" - - mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" -) - -type PoolResourceNames struct { - ImageStream PoolImageStreamList - BuildConfig PoolBuildConfigList -} - -type PoolImageStreamList struct { - // Base image for all of our builds - Base string - // Base image supplied externally, takes precedence over default CoreOS stream - ExternalBase string - // Where the render controller renders its tiny config image - RenderedConfig string - // Where the MCO outputs its multi-stage build with machineconfig to - Content string - // Where the image goes if a user uses the custom buildconfig - CustomContent string - // Hypothetically if you built a working image outside the cluster, you would tag it here - External string - // Where we look for a per-node config image - PerNode string -} - -type PoolBuildConfigList struct { - Content PoolBuildConfig - CustomContent PoolBuildConfig -} - -type PoolBuildConfig struct { - Name string - Source string - Target string - TriggeredByStreams []string - DockerfileContent string -} - -func PoolBuildResources(pool *mcfgv1.MachineConfigPool) *PoolResourceNames { - - pisl := PoolImageStreamList{ - Base: pool.Name + ctrlcommon.ImageStreamSuffixCoreOS, - ExternalBase: pool.Name + ctrlcommon.ImageStreamSuffixExternalBase, - RenderedConfig: pool.Name + ctrlcommon.ImageStreamSuffixRenderedConfig, - Content: pool.Name + ctrlcommon.ImageStreamSuffixMCOContent, - CustomContent: pool.Name + ctrlcommon.ImageStreamSuffixMCOContentCustom, - External: pool.Name + ctrlcommon.ImageStreamSuffixMCOContentExternal, - PerNode: pool.Name + ctrlcommon.ImageStreamSuffixMCOContentPerNode, - } - - // Templated dockerfile for the complicated mco-content buildconfig that applies the rendered-config - t, _ := template.New(machineConfigContentDockerfile).Parse(machineConfigContentDockerfile) - var tpl bytes.Buffer - t.Execute(&tpl, pisl) - - bcl := PoolBuildConfigList{ - Content: PoolBuildConfig{ - Name: pool.Name + "-build" + ctrlcommon.ImageStreamSuffixMCOContent, - Source: pisl.Base, - Target: pisl.Content, - TriggeredByStreams: []string{pisl.RenderedConfig + ":latest"}, - DockerfileContent: tpl.String(), - }, - CustomContent: PoolBuildConfig{ - Name: pool.Name + "-build" + ctrlcommon.ImageStreamSuffixMCOContentCustom, - Source: pisl.Content, - Target: pisl.CustomContent, - TriggeredByStreams: []string{}, - DockerfileContent: dummyDockerfile, - }, - } - - return &PoolResourceNames{pisl, bcl} -} - -// IsManagedImageStream tells us if a given image stream name is one of the names we think we should be managing. This is used to tell if -// someone has assigned some completely unmanaged imagestream to our layered pool. -func (prn *PoolResourceNames) IsManagedImageStream(imageStreamName string) bool { - // TODO(jkyros): The longer this goes on, the more I feel this should be a map - if imageStreamName == prn.ImageStream.Base || - imageStreamName == prn.ImageStream.Content || - imageStreamName == prn.ImageStream.CustomContent || - imageStreamName == prn.ImageStream.External || - imageStreamName == prn.ImageStream.PerNode { - return true - } - return false -} diff --git a/pkg/controller/common/constants.go b/pkg/controller/common/constants.go index 864871fa84..571fc825f2 100644 --- a/pkg/controller/common/constants.go +++ b/pkg/controller/common/constants.go @@ -42,37 +42,12 @@ const ( // MachineConfigPoolWorker is the MachineConfigPool name given to the worker MachineConfigPoolWorker = "worker" - // ExperimentalLayeringPoolLabel is the label that enables the "layered" workflow path for a pool - ExperimentalLayeringPoolLabel = "machineconfiguration.openshift.io/layered" + // LayeringEnabledPoolLabel is the label that enables the "layered" workflow path for a pool. + LayeringEnabledPoolLabel = "machineconfiguration.openshift.io/layering-enabled" - // ExperimentalLayeringPoolImageStreamLabel is the label that enables tells the pool which imagestream to grab images out of - ExperimentalLayeringPoolImageStreamLabel = "machineconfiguration.openshift.io/selected-image-stream" - - // ExperimentalNewestLayeredImageAnnotationKey is the annotation that signifies the newest image that has been pushed to a machine - // config pool's imagestream - ExperimentalNewestLayeredImageAnnotationKey = "machineconfiguration.openshift.io/newestImage" // ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey is the annotation that signifies which rendered config - // the latest image in the pool's imagestream is equivalent to + // TODO(zzlotnik): Determine if we should use this still. ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey = "machineconfiguration.openshift.io/newestImageEquivalentConfig" - // CoreOSImageStreamName is the name of the global (non-pool-specific) default base image stream name that pools will draw from if they - // do not have any other base image specified - CoreOSImageStreamName = "coreos" - - // ImageStreamSuffixCoreOS is the suffix for the imagestream holding the base coreos image - ImageStreamSuffixCoreOS = "-base" - // ImageStreamSuffixExternalBase is the suffix for a pool imagestream where the base image comes from outside the cluster - ImageStreamSuffixExternalBase = "-external-base" - - // ImageStreamSuffixRenderedConfig is the suffix for a pool imagestream where the mco also writes its rendered machineconfig - ImageStreamSuffixRenderedConfig = "-rendered-config" - // ImageStreamSuffixMCOContent is the suffix for a pool imagestream where the mco has applied its rendered-config to the base image - ImageStreamSuffixMCOContent = "-mco-content" - // ImageStreamSuffixMcoContentCustom is the suffix for a pool imagestream where content has been customized by a user - ImageStreamSuffixMCOContentCustom = "-mco-content-custom" - // ImageStreamSuffixMCOContentExternal is the suffix for a pool imagestream where content comes from outside the cluster - ImageStreamSuffixMCOContentExternal = "-mco-content-external" - - // ImageStreamSuffixMCOContentPerNode is the suffix for a pool imagestream containing any per-node images - ImageStreamSuffixMCOContentPerNode = "-mco-content-per-node" + OSImageBuildPodLabel = "machineconfiguration.openshift.io/buildPod" ) diff --git a/pkg/controller/common/helpers.go b/pkg/controller/common/helpers.go index f5b9c6eca6..1ad0e2eb82 100644 --- a/pkg/controller/common/helpers.go +++ b/pkg/controller/common/helpers.go @@ -1171,19 +1171,8 @@ func (n namespacedEventRecorder) AnnotatedEventf(object runtime.Object, annotati n.delegate.AnnotatedEventf(ensureEventNamespace(object), annotations, eventtype, reason, messageFmt, args...) } -func GetPoolImageStream(pool *mcfgv1.MachineConfigPool) (string, error) { - if imagestream, ok := pool.Labels[ExperimentalLayeringPoolImageStreamLabel]; ok { - return imagestream, nil - } - return "", fmt.Errorf("No ImageStream found for pool %s", pool.Name) -} - -func SetPoolImageStream(pool *mcfgv1.MachineConfigPool, imageStreamName string) { - pool.Labels[ExperimentalLayeringPoolImageStreamLabel] = imageStreamName -} - func IsLayeredPool(pool *mcfgv1.MachineConfigPool) bool { - if _, ok := pool.Labels[ExperimentalLayeringPoolLabel]; ok { + if _, ok := pool.Labels[LayeringEnabledPoolLabel]; ok { return true } return false diff --git a/pkg/daemon/constants/constants.go b/pkg/daemon/constants/constants.go index aaf272fd71..ce46ad69cf 100644 --- a/pkg/daemon/constants/constants.go +++ b/pkg/daemon/constants/constants.go @@ -8,6 +8,11 @@ const ( // // XXX + // CurrentImageAnnotationKey is used to get the current OS image pullspec for a machine + CurrentImageAnnotationKey = "machineconfiguration.openshift.io/currentImage" + // DesiredImageAnnotationKey is used to specify the desired OS image pullspec for a machine + DesiredImageAnnotationKey = "machineconfiguration.openshift.io/desiredImage" + // CurrentMachineConfigAnnotationKey is used to fetch current MachineConfig for a machine CurrentMachineConfigAnnotationKey = "machineconfiguration.openshift.io/currentConfig" // DesiredMachineConfigAnnotationKey is used to specify the desired MachineConfig for a machine From bfb4468662cf0ff7a5f3bdab5af80183eb9cda5f Mon Sep 17 00:00:00 2001 From: Zack Zlotnik Date: Wed, 28 Jun 2023 18:10:42 -0400 Subject: [PATCH 8/9] remove skopeo dependency for custom build pod --- pkg/controller/build/assets/build.sh | 4 +- pkg/controller/build/assets/wait.sh | 13 ++--- pkg/controller/build/build_controller.go | 14 ++--- pkg/controller/build/build_controller_test.go | 6 --- pkg/controller/build/fixtures_test.go | 18 ++++++- pkg/controller/build/helpers.go | 31 ----------- pkg/controller/build/helpers_test.go | 14 ----- pkg/controller/build/image_build_request.go | 22 ++++++-- pkg/controller/build/pod_build_controller.go | 54 ++++++++----------- 9 files changed, 70 insertions(+), 106 deletions(-) diff --git a/pkg/controller/build/assets/build.sh b/pkg/controller/build/assets/build.sh index b93cf32554..e57bdcdad9 100644 --- a/pkg/controller/build/assets/build.sh +++ b/pkg/controller/build/assets/build.sh @@ -25,7 +25,5 @@ buildah bud \ buildah push \ --storage-driver vfs \ --authfile="$FINAL_IMAGE_PUSH_CREDS" \ + --digestfile="/tmp/done/digestfile" \ --cert-dir /var/run/secrets/kubernetes.io/serviceaccount "$TAG" - -# Signal that we're done. -echo "done" > /tmp/done/done diff --git a/pkg/controller/build/assets/wait.sh b/pkg/controller/build/assets/wait.sh index fbc059ba13..bf1675fe63 100644 --- a/pkg/controller/build/assets/wait.sh +++ b/pkg/controller/build/assets/wait.sh @@ -5,15 +5,12 @@ # custom build pod. # Wait until the done file appears. -while [ ! -f "/tmp/done/done" ] +while [ ! -f "/tmp/done/digestfile" ] do sleep 1 done -# Inspect the image to get the digest from the registry. This produces JSON -# output which we then scrape the pod logs for. This is why we're not using set -# -x for this script. -skopeo inspect \ - --authfile "$FINAL_IMAGE_PUSH_CREDS" \ - --cert-dir /var/run/secrets/kubernetes.io/serviceaccount \ - "docker://$TAG" +oc create configmap \ + "$DIGEST_CONFIGMAP_NAME" \ + --namespace openshift-machine-config-operator \ + --from-file=digest=/tmp/done/digestfile diff --git a/pkg/controller/build/build_controller.go b/pkg/controller/build/build_controller.go index 77a0814103..4e587627c7 100644 --- a/pkg/controller/build/build_controller.go +++ b/pkg/controller/build/build_controller.go @@ -888,8 +888,8 @@ func (ctrl *Controller) getOnClusterBuildConfig(pool *mcfgv1.MachineConfigPool) } // We don't want to write this back to the API server since it's only useful - // this specific build. TODO: Migrate this to the ImageBuildRequest object so - // that it's generated on-demand instead. + // for this specific build. TODO: Migrate this to the ImageBuildRequest + // object so that it's generated on-demand instead. onClusterBuildConfigMap.Data[finalImagePullspecConfigKey] = finalImagePullspecWithTag return onClusterBuildConfigMap, err @@ -910,11 +910,11 @@ func (ctrl *Controller) validatePullSecret(name string) (*corev1.Secret, error) } // If a Docker pull secret lacks the top-level "auths" key, this means that - // it is a legacy-style pull secret. Buildah and Skopeo do not know how to - // correctly use one of these secrets. With that in mind, we "canonicalize" - // it, meaning we inject the existing legacy secret into a {"auths": {}} - // schema that Buildah and Skopeo can understand. We create a new K8s secret - // with this info and pass that secret into our image builder instead. + // it is a legacy-style pull secret. Buildah does not know how to correctly + // use one of these secrets. With that in mind, we "canonicalize" it, meaning + // we inject the existing legacy secret into a {"auths": {}} schema that + // Buildah can understand. We create a new K8s secret with this info and pass + // that secret into our image builder instead. if strings.HasSuffix(secret.Name, canonicalSecretSuffix) { klog.Infof("Found legacy-style secret %s, canonicalizing as %s", oldSecretName, secret.Name) return ctrl.handleCanonicalizedPullSecret(secret) diff --git a/pkg/controller/build/build_controller_test.go b/pkg/controller/build/build_controller_test.go index 58e1d2773b..d5bd54d981 100644 --- a/pkg/controller/build/build_controller_test.go +++ b/pkg/controller/build/build_controller_test.go @@ -361,12 +361,6 @@ func isMCPBuildSuccess(mcp *mcfgv1.MachineConfigPool) bool { return hasConfigAnnotation && ctrlcommon.IsLayeredPool(mcp) && - // Unfortunately, FakeClient has a "fake logs" value hardcoded within it. - // With that in mind, we cannot inject realistic Skopeo logs, so we instead - // inject "fake@logs" and skip the JSON parsing portion of the test. See: - // https://github.com/kubernetes/kubernetes/pull/91485. Presumably, once - // https://github.com/kubernetes/kubernetes/issues/117144 is addressed and - // lands, we can stop doing this. (imagePullspec == expectedImagePullspecWithSHA || imagePullspec == "fake@logs") && mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolBuildSuccess) && !machineConfigPoolHasBuildRef(mcp) && machineConfigPoolHasMachineConfigRefs(mcp) diff --git a/pkg/controller/build/fixtures_test.go b/pkg/controller/build/fixtures_test.go index 852977694f..eec68a7747 100644 --- a/pkg/controller/build/fixtures_test.go +++ b/pkg/controller/build/fixtures_test.go @@ -475,7 +475,22 @@ func assertMCPFollowsBuildPodStatus(ctx context.Context, t *testing.T, cs *Clien // Set the pod phase and update it. buildPod.Status.Phase = phase - // TODO: Figure out how to set / get the image pullspec from the build pod. + // If we've reached the successful pod phase, create the ConfigMap that the + // build pod does which has the resulting image digest. + if phase == corev1.PodSucceeded { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ibr.getDigestConfigMapName(), + Namespace: ctrlcommon.MCONamespace, + }, + Data: map[string]string{ + "digest": expectedImageSHA, + }, + } + _, cmErr := cs.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Create(ctx, cm, metav1.CreateOptions{}) + require.NoError(t, cmErr) + } + _, err = cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Update(ctx, buildPod, metav1.UpdateOptions{}) require.NoError(t, err) @@ -508,6 +523,7 @@ func assertMCPFollowsBuildPodStatus(ctx context.Context, t *testing.T, cs *Clien _, err := cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(ctx, buildPodName, metav1.GetOptions{}) switch endingPhase { case corev1.PodSucceeded: + // If the build pod was successful, looking it up should fail because it should have been deleted. outcome = assert.Error(t, err) case corev1.PodFailed: diff --git a/pkg/controller/build/helpers.go b/pkg/controller/build/helpers.go index b51b314e5f..9e12ccb764 100644 --- a/pkg/controller/build/helpers.go +++ b/pkg/controller/build/helpers.go @@ -7,10 +7,8 @@ import ( "encoding/json" "fmt" "io" - "time" "github.com/containers/image/v5/docker/reference" - imagetypes "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -70,35 +68,6 @@ func compress(r io.Reader, w io.Writer) error { return nil } -// Parses the output of `$ skopeo inspect -// docker://registry.hostname/org/repo:latest` into a struct to get the image -// pullspec and digest. -func parseSkopeoOutputIntoImagePullspec(skopeoBytes []byte) (string, error) { - // Copy / pasta'ed from: https://github.com/containers/skopeo/blob/main/cmd/skopeo/inspect/output.go - type skopeoOutput struct { - Name string `json:",omitempty"` - Tag string `json:",omitempty"` - Digest digest.Digest - RepoTags []string - Created *time.Time - DockerVersion string - Labels map[string]string - Architecture string - Os string - Layers []string - LayersData []imagetypes.ImageInspectLayer - Env []string - } - - out := &skopeoOutput{} - - if err := json.Unmarshal(skopeoBytes, out); err != nil { - return "", err - } - - return parseImagePullspecWithDigest(out.Name, out.Digest) -} - // Replaces any tags on the image pullspec with the provided image digest. func parseImagePullspecWithDigest(pullspec string, imageDigest digest.Digest) (string, error) { named, err := reference.ParseNamed(pullspec) diff --git a/pkg/controller/build/helpers_test.go b/pkg/controller/build/helpers_test.go index 18cdfe11b4..0f06a0a41b 100644 --- a/pkg/controller/build/helpers_test.go +++ b/pkg/controller/build/helpers_test.go @@ -17,20 +17,6 @@ func TestParseImagePullspec(t *testing.T) { assert.Equal(t, expectedImagePullspecWithSHA, out) } -// Tests that Skopeo output is correctly parsed. For brevity, I did not include the full output. -func TestParseSkopeoOutput(t *testing.T) { - t.Parallel() - - skopeoOutput := `{ - "Name": "quay.io/zzlotnik/testing", - "Digest": "sha256:c2a723564f370e80df76f8355c410934fa0b274f406e5cbdc22075f796b63f4e" - }` - - out, err := parseSkopeoOutputIntoImagePullspec([]byte(skopeoOutput)) - assert.NoError(t, err) - assert.Equal(t, "quay.io/zzlotnik/testing@sha256:c2a723564f370e80df76f8355c410934fa0b274f406e5cbdc22075f796b63f4e", out) -} - // Tests that pull secrets are canonicalized. In other words, converted from // the legacy-style pull secret to the new-style secret. func TestCanonicalizePullSecret(t *testing.T) { diff --git a/pkg/controller/build/image_build_request.go b/pkg/controller/build/image_build_request.go index 3c5b568b24..90fcce9167 100644 --- a/pkg/controller/build/image_build_request.go +++ b/pkg/controller/build/image_build_request.go @@ -18,7 +18,6 @@ const ( mcPoolAnnotation string = "machineconfiguration.openshift.io/pool" machineConfigJSONFilename string = "machineconfig.json.gz" buildahImagePullspec string = "quay.io/buildah/stable:latest" - skopeoImagePullspec string = "quay.io/skopeo/stable:latest" ) //go:embed assets/Dockerfile.on-cluster-build-template @@ -235,6 +234,10 @@ func (i ImageBuildRequest) toBuild() *buildv1.Build { // ConfigMaps / Secrets / etc. wired into it. func (i ImageBuildRequest) toBuildPod() *corev1.Pod { env := []corev1.EnvVar{ + { + Name: "DIGEST_CONFIGMAP_NAME", + Value: i.getDigestConfigMapName(), + }, { Name: "HOME", Value: "/home/build", @@ -287,8 +290,10 @@ func (i ImageBuildRequest) toBuildPod() *corev1.Pod { } // TODO: We need pull creds with permissions to pull the base image. By - // default, none of the MCO pull secrets can directly pull it. I injected my - // own pull creds to do that. + // default, none of the MCO pull secrets can directly pull it. We can use the + // pull-secret creds from openshift-config to do that, though we'll need to + // mirror those creds into the MCO namespace. The operator portion of the MCO + // has some logic to detect whenever that secret changes. return &corev1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -311,11 +316,14 @@ func (i ImageBuildRequest) toBuildPod() *corev1.Pod { }, { // This container waits for the aforementioned container to finish - // building so we can get the final image SHA. + // building so we can get the final image SHA. We do this by using + // the base OS image (which contains the "oc" binary) to create a + // ConfigMap from the digestfile that Buildah creates, which allows + // us to avoid parsing log files. Name: "wait-for-done", Env: env, Command: append(command, waitScript), - Image: skopeoImagePullspec, + Image: i.BaseImage.Pullspec, ImagePullPolicy: corev1.PullAlways, SecurityContext: securityContext, VolumeMounts: volumeMounts, @@ -421,3 +429,7 @@ func (i ImageBuildRequest) getMCConfigMapName() string { func (i ImageBuildRequest) getBuildName() string { return fmt.Sprintf("build-%s", i.Pool.Spec.Configuration.Name) } + +func (i ImageBuildRequest) getDigestConfigMapName() string { + return fmt.Sprintf("digest-%s", i.Pool.Spec.Configuration.Name) +} diff --git a/pkg/controller/build/pod_build_controller.go b/pkg/controller/build/pod_build_controller.go index 26fb853072..361c9d1955 100644 --- a/pkg/controller/build/pod_build_controller.go +++ b/pkg/controller/build/pod_build_controller.go @@ -12,6 +12,7 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + aggerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -185,50 +186,41 @@ func (ctrl *PodBuildController) Run(ctx context.Context, workers int) { <-ctx.Done() } -// Gets the final image pullspec by examining the logs of the wait-for-done -// container. This container runs `$ skopeo inspect -// docker://registry.hostname.com/org/repo:latest`, which dumps a JSON payload -// to stdout. This function snarfs that output into a JSON struct and does some -// munging to get the fully qualified image pullspec (i.e., -// registry.hostname.com/org/repo@sha256:...). +// Gets the final image pullspec by retrieving the ConfigMap that the build pod +// creates from the Buildah digestfile. func (ctrl *PodBuildController) FinalPullspec(pool *mcfgv1.MachineConfigPool) (string, error) { - ibr := newImageBuildRequest(pool) - - buildName := ibr.getBuildName() - - pod, err := ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) + onClusterBuildConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), onClusterBuildConfigMapName, metav1.GetOptions{}) if err != nil { - return "", fmt.Errorf("could not get build pod %s for pool %s: %w", buildName, pool.Name, err) - } - - req := ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).GetLogs(pod.Name, &corev1.PodLogOptions{ - Container: "wait-for-done", - }) - - res := req.Do(context.TODO()) - if err := res.Error(); err != nil { return "", err } - out, err := res.Raw() + finalImageInfo := newFinalImageInfo(onClusterBuildConfigMap) + ibr := newImageBuildRequest(pool) + + digestConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), ibr.getDigestConfigMapName(), metav1.GetOptions{}) if err != nil { return "", err } - // Unfortunately, FakeClient has a "fake logs" value hardcoded within it. - // With that in mind, we need to short-circuit this. See: - // https://github.com/kubernetes/kubernetes/pull/91485. - if string(out) == "fake logs" { - return "fake@logs", nil - } - - return parseSkopeoOutputIntoImagePullspec(out) + return parseImagePullspec(finalImageInfo.Pullspec, digestConfigMap.Data["digest"]) } // Deletes the underlying build pod. func (ctrl *PodBuildController) DeleteBuildObject(pool *mcfgv1.MachineConfigPool) error { - ibr := newImageBuildRequest(pool) - return ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getBuildName(), metav1.DeleteOptions{}) + // We want to ignore when a pod or ConfigMap is deleted if it is not found. + // This is because when a pool is opted out of layering *after* a successful + // build, no pod nor ConfigMap will remain. So we want to be able to + // idempotently call this function in that case. + return aggerrors.AggregateGoroutines( + func() error { + ibr := newImageBuildRequest(pool) + return ignoreIsNotFoundErr(ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getBuildName(), metav1.DeleteOptions{})) + }, + func() error { + ibr := newImageBuildRequest(pool) + return ignoreIsNotFoundErr(ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getDigestConfigMapName(), metav1.DeleteOptions{})) + }, + ) } // Determines if a build is currently running by looking for a corresponding pod. From 48ac23edf1ee43d5aec60d2eb1e216eb9dd45e2d Mon Sep 17 00:00:00 2001 From: Zack Zlotnik Date: Mon, 3 Jul 2023 14:19:16 -0400 Subject: [PATCH 9/9] adds podman builder and clarifies buildah --- pkg/controller/build/assets/README.md | 5 + .../assets/{build.sh => buildah-build.sh} | 0 pkg/controller/build/assets/podman-build.sh | 35 ++++ pkg/controller/build/build_controller.go | 2 +- pkg/controller/build/image_build_request.go | 190 +++++++++++++++++- pkg/controller/build/pod_build_controller.go | 10 +- 6 files changed, 235 insertions(+), 7 deletions(-) create mode 100644 pkg/controller/build/assets/README.md rename pkg/controller/build/assets/{build.sh => buildah-build.sh} (100%) create mode 100644 pkg/controller/build/assets/podman-build.sh diff --git a/pkg/controller/build/assets/README.md b/pkg/controller/build/assets/README.md new file mode 100644 index 0000000000..9b4c75a333 --- /dev/null +++ b/pkg/controller/build/assets/README.md @@ -0,0 +1,5 @@ +# assets + +These files get embedded within the Go binary and are not intended for direct +use. In particular, the Dockerfile is interspersed with Go templates and will +not build unless rendered with a tool such as [Gomplate](https://github.com/hairyhenderson/gomplate). diff --git a/pkg/controller/build/assets/build.sh b/pkg/controller/build/assets/buildah-build.sh similarity index 100% rename from pkg/controller/build/assets/build.sh rename to pkg/controller/build/assets/buildah-build.sh diff --git a/pkg/controller/build/assets/podman-build.sh b/pkg/controller/build/assets/podman-build.sh new file mode 100644 index 0000000000..882c4f5ef9 --- /dev/null +++ b/pkg/controller/build/assets/podman-build.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# +# This script is not meant to be directly executed. Instead, it is embedded +# within the Build Controller binary (see //go:embed) and injected into a +# custom build pod. +set -xeuo + +build_context="/tmp/context" + +# Create a directory to hold our build context. +mkdir -p "$build_context/machineconfig" + +# Copy the Dockerfile and Machineconfigs from configmaps into our build context. +cp /tmp/dockerfile/Dockerfile "$build_context" +cp /tmp/machineconfig/machineconfig.json.gz "$build_context/machineconfig/" + +# Build our image using Buildah. +podman build \ + --storage-driver vfs \ + --authfile="$BASE_IMAGE_PULL_CREDS" \ + --tag "$TAG" \ + --file="$build_context/Dockerfile" "$build_context" + +# Push our built image. +podman push \ + --storage-driver vfs \ + --authfile="$FINAL_IMAGE_PUSH_CREDS" \ + --digestfile="/tmp/digestfile" \ + --cert-dir /var/run/secrets/kubernetes.io/serviceaccount "$TAG" + +# Store the digestfile in a configmap for future retrieval. +oc create configmap \ + "$DIGEST_CONFIGMAP_NAME" \ + --namespace openshift-machine-config-operator \ + --from-file=digest=/tmp/digestfile diff --git a/pkg/controller/build/build_controller.go b/pkg/controller/build/build_controller.go index 4e587627c7..10343ed083 100644 --- a/pkg/controller/build/build_controller.go +++ b/pkg/controller/build/build_controller.go @@ -794,7 +794,7 @@ func (ctrl *Controller) prepareMachineConfigForPool(ibr ImageBuildRequest) error _, err = ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Create(context.TODO(), dockerfileConfigMap, metav1.CreateOptions{}) if err != nil { - return fmt.Errorf("could not load rendered MachineConfig %s into configmap: %w", dockerfileConfigMap.Name, err) + return fmt.Errorf("could not load rendered Dockerfile %s into configmap: %w", dockerfileConfigMap.Name, err) } klog.Infof("Stored Dockerfile for build %s in ConfigMap %s for build", ibr.getBuildName(), dockerfileConfigMap.Name) diff --git a/pkg/controller/build/image_build_request.go b/pkg/controller/build/image_build_request.go index 90fcce9167..801d8d15a7 100644 --- a/pkg/controller/build/image_build_request.go +++ b/pkg/controller/build/image_build_request.go @@ -10,6 +10,7 @@ import ( buildv1 "github.com/openshift/api/build/v1" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/helpers" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -26,8 +27,11 @@ var dockerfileTemplate string //go:embed assets/wait.sh var waitScript string -//go:embed assets/build.sh -var buildScript string +//go:embed assets/buildah-build.sh +var buildahBuildScript string + +//go:embed assets/podman-build.sh +var podmanBuildScript string // Represents a given image pullspec and the location of the pull secret. type ImageInfo struct { @@ -233,6 +237,185 @@ func (i ImageBuildRequest) toBuild() *buildv1.Build { // Creates a custom image build pod to build the final OS image with all // ConfigMaps / Secrets / etc. wired into it. func (i ImageBuildRequest) toBuildPod() *corev1.Pod { + return i.toBuildahPod() +} + +// This reflects an attempt to use Podman to perform the OS build. +// Unfortunately, it was difficult to get this to run unprivileged and I was +// not able to figure out a solution. Nevertheless, I will leave it here for +// posterity. +func (i ImageBuildRequest) toPodmanPod() *corev1.Pod { + env := []corev1.EnvVar{ + { + Name: "DIGEST_CONFIGMAP_NAME", + Value: i.getDigestConfigMapName(), + }, + { + Name: "HOME", + Value: "/tmp", + }, + { + Name: "TAG", + Value: i.FinalImage.Pullspec, + }, + { + Name: "BASE_IMAGE_PULL_CREDS", + Value: "/tmp/base-image-pull-creds/config.json", + }, + { + Name: "FINAL_IMAGE_PUSH_CREDS", + Value: "/tmp/final-image-push-creds/config.json", + }, + } + + command := []string{"/bin/bash", "-c"} + + volumeMounts := []corev1.VolumeMount{ + { + Name: "machineconfig", + MountPath: "/tmp/machineconfig", + }, + { + Name: "dockerfile", + MountPath: "/tmp/dockerfile", + }, + { + Name: "base-image-pull-creds", + MountPath: "/tmp/base-image-pull-creds", + }, + { + Name: "final-image-push-creds", + MountPath: "/tmp/final-image-push-creds", + }, + { + Name: "done", + MountPath: "/tmp/done", + }, + } + + // See: https://access.redhat.com/solutions/6964609 + // TL;DR: Trying to get podman / buildah to run in an unprivileged container + // is quite involved. However, OpenShift Builder runs in privileged + // containers, which sets a precedent. + // This requires that one run: $ oc adm policy add-scc-to-user -z machine-os-builder privileged + securityContext := &corev1.SecurityContext{ + Privileged: helpers.BoolToPtr(true), + SeccompProfile: &corev1.SeccompProfile{ + Type: "Localhost", + LocalhostProfile: helpers.StrToPtr("profiles/unshare.json"), + }, + } + + // TODO: We need pull creds with permissions to pull the base image. By + // default, none of the MCO pull secrets can directly pull it. We can use the + // pull-secret creds from openshift-config to do that, though we'll need to + // mirror those creds into the MCO namespace. The operator portion of the MCO + // has some logic to detect whenever that secret changes. + return &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: i.getObjectMeta(i.getBuildName()), + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + // This container performs the image build / push process. + // Additionally, it takes the digestfile which podman creates, which + // contains the SHA256 from the container registry, and stores this + // in a ConfigMap which is consumed after the pod stops. + Name: "image-build", + Image: i.BaseImage.Pullspec, + Env: env, + Command: append(command, podmanBuildScript), + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: securityContext, + VolumeMounts: volumeMounts, + }, + }, + // We probably cannot count on the 'builder' service account being + // present in the future. If we cannot use the builder service account + // means that we'll need to: + // 1. Create a SecurityContextConstraint. + // 2. Additional RBAC / ClusterRole / etc. work to suss this out. + ServiceAccountName: "machine-os-builder", + Volumes: []corev1.Volume{ // nolint:dupl // I don't want to deduplicate this yet since there are still some unknowns. + { + // Provides the rendered Dockerfile. + Name: "dockerfile", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: i.getDockerfileConfigMapName(), + }, + }, + }, + }, + { + // Provides the rendered MachineConfig in a gzipped / base64-encoded + // format. + Name: "machineconfig", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: i.getMCConfigMapName(), + }, + }, + }, + }, + { + // Provides the credentials needed to pull the base OS image. + Name: "base-image-pull-creds", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: i.BaseImage.PullSecret.Name, + Items: []corev1.KeyToPath{ + { + Key: corev1.DockerConfigJsonKey, + Path: "config.json", + }, + }, + }, + }, + }, + { + // Provides the credentials needed to push the final OS image. + Name: "final-image-push-creds", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: i.FinalImage.PullSecret.Name, + Items: []corev1.KeyToPath{ + { + Key: corev1.DockerConfigJsonKey, + Path: "config.json", + }, + }, + }, + }, + }, + { + // Provides a way for the "image-build" container to signal that it + // finished so that the "wait-for-done" container can retrieve the + // iamge SHA. + Name: "done", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }, + }, + }, + } +} + +// We're able to run the Buildah image in an unprivileged pod provided that the +// machine-os-builder service account has the anyuid security constraint +// context enabled to allow us to use UID 1000, which maps to the UID within +// the official Buildah image. +// nolint:dupl // I don't want to deduplicate this yet since there are still some unknowns. +func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { env := []corev1.EnvVar{ { Name: "DIGEST_CONFIGMAP_NAME", @@ -309,7 +492,7 @@ func (i ImageBuildRequest) toBuildPod() *corev1.Pod { // TODO: Figure out how to not hard-code this here. Image: buildahImagePullspec, Env: env, - Command: append(command, buildScript), + Command: append(command, buildahBuildScript), ImagePullPolicy: corev1.PullAlways, SecurityContext: securityContext, VolumeMounts: volumeMounts, @@ -329,6 +512,7 @@ func (i ImageBuildRequest) toBuildPod() *corev1.Pod { VolumeMounts: volumeMounts, }, }, + ServiceAccountName: "machine-os-builder", Volumes: []corev1.Volume{ { // Provides the rendered Dockerfile. diff --git a/pkg/controller/build/pod_build_controller.go b/pkg/controller/build/pod_build_controller.go index 361c9d1955..adb7111d7b 100644 --- a/pkg/controller/build/pod_build_controller.go +++ b/pkg/controller/build/pod_build_controller.go @@ -288,14 +288,14 @@ func (ctrl *PodBuildController) updatePod(oldObj, curObj interface{}) { isBuildPod := hasAllRequiredOSBuildLabels(curPod.Labels) - klog.Infof("Updating pod %s. Is build pod? %v", curPod.Name, isBuildPod) - // Ignore non-build pods. // TODO: Figure out if we can add the filter criteria onto the lister. if !isBuildPod { return } + klog.Infof("Updating pod %s. Is build pod? %v", curPod.Name, isBuildPod) + if oldPod.Status.Phase != curPod.Status.Phase { klog.Infof("Pod %s changed from %s to %s", oldPod.Name, oldPod.Status.Phase, curPod.Status.Phase) } @@ -325,7 +325,11 @@ func (ctrl *PodBuildController) handleErr(err error, key interface{}) { // Fires whenever a pod is deleted. func (ctrl *PodBuildController) deletePod(obj interface{}) { - pod := obj.(*corev1.Pod).DeepCopy() + pod, ok := obj.(*corev1.Pod) + if !ok { + return + } + pod = pod.DeepCopy() klog.V(4).Infof("Deleting Pod %s. Is build pod? %v", pod.Name, hasAllRequiredOSBuildLabels(pod.Labels)) ctrl.enqueuePod(pod) }