Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Set sync setting in config automatically #18

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 4 additions & 5 deletions .github/workflows/ci_tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -81,11 +81,10 @@ jobs:

- name: E2E Tests
run: |
make deploy-ci NAMESPACE=mygatekeeper IMG=localhost:5000/gatekeeper-operator:$GITHUB_SHA
kubectl -n mygatekeeper wait deployment/gatekeeper-operator-controller --for condition=Available --timeout=90s
kubectl -n mygatekeeper logs deployment/gatekeeper-operator-controller -c manager -f > operator.log &
make test-e2e NAMESPACE=mygatekeeper
kubectl delete --wait namespace mygatekeeper
make deploy-ci NAMESPACE=gatekeeper-system IMG=localhost:5000/gatekeeper-operator:$GITHUB_SHA
mprahl marked this conversation as resolved.
Show resolved Hide resolved
kubectl -n gatekeeper-system wait deployment/gatekeeper-operator-controller --for condition=Available --timeout=90s
kubectl -n gatekeeper-system logs deployment/gatekeeper-operator-controller -c manager -f > operator.log &
make test-e2e NAMESPACE=gatekeeper-system
yiraeChristineKim marked this conversation as resolved.
Show resolved Hide resolved

- name: Debug
if: ${{ failure() }}
Expand Down
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,5 @@ testbin/*
!vendor/**/zz_generated.*

ci-tools/

.vscode/*
13 changes: 10 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -146,11 +146,11 @@ tidy: ## Run go mod tidy

.PHONY: test
test: manifests generate fmt vet envtest ## Run tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" GOFLAGS=$(GOFLAGS) go test ./... -coverprofile cover.out
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" GOFLAGS=$(GOFLAGS) go test $(go list ./... | grep -v /test/) -coverprofile cover.out
yiraeChristineKim marked this conversation as resolved.
Show resolved Hide resolved

.PHONY: test-e2e
test-e2e: e2e-dependencies generate fmt vet ## Run e2e tests, using the configured Kubernetes cluster in ~/.kube/config
GOFLAGS=$(GOFLAGS) USE_EXISTING_CLUSTER=true $(GINKGO) -v --trace --fail-fast --label-filter="$(LABEL_FILTER)" ./test/e2e -- --namespace="$(NAMESPACE)" --timeout="5m" --delete-timeout="10m"
yiraeChristineKim marked this conversation as resolved.
Show resolved Hide resolved
GOFLAGS=$(GOFLAGS) USE_EXISTING_CLUSTER=true $(GINKGO) -v --trace --fail-fast ./test/e2e -- --namespace="$(NAMESPACE)" --timeout="5m" --delete-timeout="10m"

.PHONY: test-cluster
test-cluster: ## Create a local kind cluster with a registry for testing
Expand Down Expand Up @@ -182,7 +182,14 @@ download-binaries: kustomize go-bindata envtest controller-gen
rm -rf bats-core-${BATS_VERSION} v${BATS_VERSION}.tar.gz; \
fi

##@ Build
.PHONY: kind-bootstrap-cluster
kind-bootstrap-cluster: test-cluster install dev-build
kubectl label ns $(NAMESPACE) --overwrite pod-security.kubernetes.io/audit=privileged
kubectl label ns $(NAMESPACE) --overwrite pod-security.kubernetes.io/enforce=privileged
kubectl label ns $(NAMESPACE) --overwrite pod-security.kubernetes.io/warn=privileged
kind load docker-image $(IMG)
$(MAKE) deploy-ci NAMESPACE=$(NAMESPACE) IMG=$(IMG)
kubectl -n $(NAMESPACE) wait deployment/gatekeeper-operator-controller --for condition=Available --timeout=90s

.PHONY: build
build: generate fmt vet ## Build manager binary.
Expand Down
9 changes: 6 additions & 3 deletions api/v1alpha1/gatekeeper_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,8 @@ type AuditConfig struct {
// +optional
ConstraintViolationLimit *uint64 `json:"constraintViolationLimit,omitempty"`
// +optional
// Setting Automatic lets the Gatekeeper operator manage syncOnly in the config resource.
// It is not recommended to use Automatic when using referential constraints since those are not detected.
AuditFromCache *AuditFromCacheMode `json:"auditFromCache,omitempty"`
// +kubebuilder:validation:Minimum:=0
// +optional
Expand Down Expand Up @@ -140,12 +142,13 @@ const (
LogLevelError LogLevelMode = "ERROR"
)

// +kubebuilder:validation:Enum:=Enabled;Disabled
// +kubebuilder:validation:Enum:=Enabled;Disabled;Automatic
type AuditFromCacheMode string
yiraeChristineKim marked this conversation as resolved.
Show resolved Hide resolved

const (
AuditFromCacheEnabled AuditFromCacheMode = "Enabled"
AuditFromCacheDisabled AuditFromCacheMode = "Disabled"
AuditFromCacheEnabled AuditFromCacheMode = "Enabled"
AuditFromCacheDisabled AuditFromCacheMode = "Disabled"
AuditFromCacheAutomatic AuditFromCacheMode = "Automatic"
)

// +kubebuilder:validation:Enum:=Enabled;Disabled
Expand Down
5 changes: 5 additions & 0 deletions bundle/manifests/operator.gatekeeper.sh_gatekeepers.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -864,9 +864,14 @@ spec:
minimum: 0
type: integer
auditFromCache:
description: Setting Automatic lets the Gatekeeper operator manage
syncOnly in the config resource. It is not recommended to use
Automatic when using referential constraints since those are
not detected.
enum:
- Enabled
- Disabled
- Automatic
type: string
auditInterval:
type: string
Expand Down
5 changes: 5 additions & 0 deletions config/crd/bases/operator.gatekeeper.sh_gatekeepers.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -864,9 +864,14 @@ spec:
minimum: 0
type: integer
auditFromCache:
description: Setting Automatic lets the Gatekeeper operator manage
syncOnly in the config resource. It is not recommended to use
Automatic when using referential constraints since those are
not detected.
enum:
- Enabled
- Disabled
- Automatic
type: string
auditInterval:
type: string
Expand Down
259 changes: 259 additions & 0 deletions controllers/constraintstatus_controller.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,259 @@
package controllers

import (
"context"
"reflect"
"sort"
"time"

operatorv1alpha1 "github.com/gatekeeper/gatekeeper-operator/api/v1alpha1"
"github.com/go-logr/logr"
"github.com/open-policy-agent/gatekeeper/v3/apis/config/v1alpha1"
"github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/utils/strings/slices"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)

var ControllerName = "constraintstatus_reconciler"

type ConstraintPodStatusReconciler struct {
client.Client
Scheme *runtime.Scheme
Log logr.Logger
DynamicClient *dynamic.DynamicClient
Namespace string
// This includes api-resources list and it finds a missing version of resources.
DiscoveryStorage *DiscoveryStorage
// key = constraintPodName
ConstraintToSyncOnly map[string][]v1alpha1.SyncOnlyEntry
}

// SetupWithManager sets up the controller with the Manager.
yiraeChristineKim marked this conversation as resolved.
Show resolved Hide resolved
func (r *ConstraintPodStatusReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{MaxConcurrentReconciles: int(1)}).
Named(ControllerName).
For(&v1beta1.ConstraintPodStatus{},
yiraeChristineKim marked this conversation as resolved.
Show resolved Hide resolved
builder.WithPredicates(predicate.Funcs{
// Execute this reconcile func when it is audit-constraintStatuspod
// because a constraint creates 4 constraintPodstatus
CreateFunc: func(e event.CreateEvent) bool {
obj := e.Object.(*v1beta1.ConstraintPodStatus)

return slices.Contains(obj.Status.Operations, "audit")
yiraeChristineKim marked this conversation as resolved.
Show resolved Hide resolved
},
UpdateFunc: func(e event.UpdateEvent) bool {
oldObj := e.ObjectOld.(*v1beta1.ConstraintPodStatus)
newObj := e.ObjectNew.(*v1beta1.ConstraintPodStatus)

return slices.Contains(newObj.Status.Operations, "audit") &&
// Update when the constraint is refreshed
oldObj.Status.ObservedGeneration != newObj.Status.ObservedGeneration
},
DeleteFunc: func(e event.DeleteEvent) bool {
obj := e.Object.(*v1beta1.ConstraintPodStatus)

return slices.Contains(obj.Status.Operations, "audit")
},
},
)).
Complete(r)
}

// When spec.audit.auditFromCache is set to Automatic,
// Reconcile analyzes the constraint associated with the ConstraintPodStatus reconcile request.
// The kinds used in the constraint's match configuration is used to configure the syncOnly option.
func (r *ConstraintPodStatusReconciler) Reconcile(ctx context.Context,
request reconcile.Request,
) (reconcile.Result, error) {
log := r.Log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
log.Info("Reconciling ConstraintPodStatus and Config")
// This is used for RequeueAfter
var requeueTime time.Duration

gatekeeper := &operatorv1alpha1.Gatekeeper{}
// Get gatekeeper resource
err := r.Get(ctx, types.NamespacedName{
Namespace: "",
Name: "gatekeeper",
}, gatekeeper)
if err != nil {
yiraeChristineKim marked this conversation as resolved.
Show resolved Hide resolved
if apierrors.IsNotFound(err) {
log.Error(err, "Gatekeeper resource is not found")

return reconcile.Result{}, nil
}

return reconcile.Result{}, err
}

// Get config or create if not exist
config := &v1alpha1.Config{}
err = r.Get(ctx, types.NamespacedName{
Namespace: r.Namespace,
Name: "config",
}, config)

if err != nil {
if apierrors.IsNotFound(err) {
config = &v1alpha1.Config{
ObjectMeta: metav1.ObjectMeta{
Name: "config",
Namespace: r.Namespace,
},
}

createErr := r.Create(ctx, config)
if createErr != nil {
log.Error(err, "Fail to create the Gatekeeper Config object, will retry.")

return reconcile.Result{}, createErr
}

log.Info("The Gatekeeper Config object was created")
} else {
return reconcile.Result{}, err
}
}

constraintPodStatus := &v1beta1.ConstraintPodStatus{}

err = r.Get(ctx, request.NamespacedName, constraintPodStatus)
if err != nil {
if apierrors.IsNotFound(err) {
log.V(1).Info("Cannot find the ConstraintPodStatus")

err = r.handleDeleteEvent(ctx, request.Name, config)
if err != nil {
return reconcile.Result{}, err
}

return reconcile.Result{}, nil
}
// Requeue
return reconcile.Result{}, err
}

constraint, constraintName, err := getConstraint(ctx, *constraintPodStatus, r.DynamicClient)
if err != nil {
if apierrors.IsNotFound(err) {
r.Log.Info("The Constraint was not found", "constraintName:", constraintName)

return reconcile.Result{}, nil
}

return reconcile.Result{}, err
yiraeChristineKim marked this conversation as resolved.
Show resolved Hide resolved
}

constraintMatchKinds, _, err := unstructured.NestedSlice(constraint.Object, "spec", "match", "kinds")
if err != nil {
r.Log.V(1).Info("There are no provided kinds in the Constraint", "constraintName:", constraintName)

err = r.handleDeleteEvent(ctx, request.Name, config)
if err != nil {
return reconcile.Result{}, err
}

return reconcile.Result{}, nil
}

constraintSyncOnlyEntries, err := r.DiscoveryStorage.getSyncOnlys(constraintMatchKinds)
if err != nil {
if errors.Is(err, ErrNotFoundDiscovery) {
r.Log.V(1).Info("Cannot find matched discovery. Requeue after 10 secs")

requeueTime = time.Second * 10
} else {
log.Error(err, "Error to get matching kind and apigroup")

return reconcile.Result{}, err
}
}

r.ConstraintToSyncOnly[request.Name] = constraintSyncOnlyEntries

uniqSyncOnly := r.getUniqSyncOnly()

if reflect.DeepEqual(uniqSyncOnly, config.Spec.Sync.SyncOnly) {
r.Log.V(1).Info("There are no changes detected. Cancel Updating")

return reconcile.Result{RequeueAfter: requeueTime}, nil
}

config.Spec.Sync.SyncOnly = uniqSyncOnly

err = r.Update(ctx, config, &client.UpdateOptions{})
if err != nil {
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you please log this error?

log.Error(err, "unable to update config syncOnly")

return reconcile.Result{}, err
Copy link

@mprahl mprahl Nov 21, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can still update but have encountered a discovery error.

Suggested change
return reconcile.Result{}, err
return reconcile.Result{RequeueAfter: requeueTime}, nil

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

return reconcile.Result{}, err

}

return reconcile.Result{RequeueAfter: requeueTime}, nil
}

func (r *ConstraintPodStatusReconciler) getUniqSyncOnly() []v1alpha1.SyncOnlyEntry {
syncOnlySet := map[v1alpha1.SyncOnlyEntry]bool{}
// Add to table for unique filtering
for _, syncEntries := range r.ConstraintToSyncOnly {
for _, entry := range syncEntries {
syncOnlySet[entry] = true
}
}

syncOnlys := make([]v1alpha1.SyncOnlyEntry, 0, len(syncOnlySet))
for key := range syncOnlySet {
yiraeChristineKim marked this conversation as resolved.
Show resolved Hide resolved
syncOnlys = append(syncOnlys, key)
}

// Sort syncOnly so the returned value is consistent each time the method is called.
sort.Slice(syncOnlys, func(i, j int) bool {
stringi := syncOnlys[i].Group + " " + syncOnlys[i].Kind + " " + syncOnlys[i].Version
stringj := syncOnlys[j].Group + " " + syncOnlys[j].Kind + " " + syncOnlys[j].Version

return stringi < stringj
})

return syncOnlys
}

// handleDeleteEvent is called when a ConstraintPodStatus object is deleted.
// It deletes ConstraintPodStatus' key in the `ConstraintToSyncOnly` map and
// recalculates the appropriate SyncOnly entries.
func (r *ConstraintPodStatusReconciler) handleDeleteEvent(
ctx context.Context, cpsName string, config *v1alpha1.Config,
) error {
delete(r.ConstraintToSyncOnly, cpsName)

updatedSyncOnly := r.getUniqSyncOnly()

if reflect.DeepEqual(updatedSyncOnly, config.Spec.Sync.SyncOnly) {
r.Log.V(1).Info("There are no changes detected. Will not update.")

return nil
}

config.Spec.Sync.SyncOnly = updatedSyncOnly
mprahl marked this conversation as resolved.
Show resolved Hide resolved

err := r.Update(ctx, config, &client.UpdateOptions{})
if err != nil {
r.Log.Error(err, "unable to update config syncOnly")

return err
}

return nil
}
Loading