Skip to content

Commit

Permalink
Add quickstart
Browse files Browse the repository at this point in the history
make quickstart will:
1. create a kind cluster as management cluster
2. deploy cluster-api
3. create a workload cluster powered by clusterAPI
4. deploy projectsveltos in the management cluster
  • Loading branch information
mgianluc committed Aug 16, 2023
1 parent 7db355d commit f678a51
Show file tree
Hide file tree
Showing 8 changed files with 767 additions and 75 deletions.
61 changes: 43 additions & 18 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ ARCH ?= amd64
OS ?= $(shell uname -s | tr A-Z a-z)
K8S_LATEST_VER ?= $(shell curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)
export CONTROLLER_IMG ?= $(REGISTRY)/$(IMAGE_NAME)
TAG ?= main
TAG ?= dev

# Get cluster-api version and build ldflags
clusterapi := $(shell go list -m sigs.k8s.io/cluster-api)
Expand Down Expand Up @@ -177,6 +177,27 @@ TIMEOUT ?= 10m
KIND_CLUSTER_YAML ?= test/$(WORKLOAD_CLUSTER_NAME).yaml
NUM_NODES ?= 5

.PHONY: quickstart
quickstart: ## start kind cluster; install all cluster api components; create a capi cluster; install projectsveltos
$(MAKE) create-control-cluster

@echo wait for capd-system pod
$(KUBECTL) wait --for=condition=Available deployment/capd-controller-manager -n capd-system --timeout=$(TIMEOUT)
$(KUBECTL) wait --for=condition=Available deployment/capi-kubeadm-control-plane-controller-manager -n capi-kubeadm-control-plane-system --timeout=$(TIMEOUT)

$(MAKE) create-workload-cluster

# this is needed fopr projectsveltos metrics
$(KUBECTL) apply -f test/quickstart/servicemonitor_crd.yaml

@echo "Start projectsveltos"
$(KUBECTL) apply -f https://raw.githubusercontent.com/projectsveltos/sveltos/$(TAG)/manifest/manifest.yaml
$(KUBECTL) apply -f https://raw.githubusercontent.com/projectsveltos/sveltos/$(TAG)/manifest/default-classifier.yaml
$(KUBECTL) apply -f https://raw.githubusercontent.com/projectsveltos/sveltos/main/manifest/sveltosctl_manifest.yaml

@echo "Waiting for projectsveltos addon-controller to be available..."
$(KUBECTL) wait --for=condition=Available deployment/addon-controller -n projectsveltos --timeout=$(TIMEOUT)

.PHONY: test
test: | check-manifests generate fmt vet $(SETUP_ENVTEST) ## Run uts.
KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test $(shell go list ./... |grep -v test/fv |grep -v test/helpers) $(TEST_ARGS) -coverprofile cover.out
Expand All @@ -199,27 +220,11 @@ create-cluster: $(KIND) $(CLUSTERCTL) $(KUBECTL) $(ENVSUBST) ## Create a new kin
@echo "Start projectsveltos"
$(MAKE) deploy-projectsveltos

@echo "Create a workload cluster"
$(KUBECTL) apply -f $(KIND_CLUSTER_YAML)

@echo "wait for cluster to be provisioned"
$(KUBECTL) wait cluster $(WORKLOAD_CLUSTER_NAME) -n default --for=jsonpath='{.status.phase}'=Provisioned --timeout=$(TIMEOUT)
$(MAKE) create-workload-cluster

@echo "prepare configMap with kustomize files"
$(KUBECTL) create configmap kustomize --from-file=test/kustomize.tar.gz

@echo "sleep allowing control plane to be ready"
sleep 100

@echo "get kubeconfig to access workload cluster"
$(KIND) get kubeconfig --name $(WORKLOAD_CLUSTER_NAME) > test/fv/workload_kubeconfig

@echo "install calico on workload cluster"
$(KUBECTL) --kubeconfig=./test/fv/workload_kubeconfig apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/calico.yaml

@echo wait for calico pod
$(KUBECTL) --kubeconfig=./test/fv/workload_kubeconfig wait --for=condition=Available deployment/calico-kube-controllers -n kube-system --timeout=$(TIMEOUT)

@echo apply reloader CRD to managed cluster
$(KUBECTL) --kubeconfig=./test/fv/workload_kubeconfig apply -f https://raw.githubusercontent.com/projectsveltos/libsveltos/$(TAG)/config/crd/bases/lib.projectsveltos.io_reloaders.yaml

Expand All @@ -236,6 +241,7 @@ delete-cluster: $(KIND) ## Deletes the kind cluster $(CONTROL_CLUSTER_NAME)
#
# add this target. It needs to be run only when changing cluster-api version. create-cluster target uses the output of this command which is stored within repo
# It requires control cluster to exist. So first "make create-control-cluster" then run this target.
# Once generated, add label to cluster env: fv
# Once generated, remove
# enforce: "{{ .podSecurityStandard.enforce }}"
# enforce-version: "latest"
Expand All @@ -250,6 +256,25 @@ create-control-cluster: $(KIND) $(CLUSTERCTL)
@echo "Create control cluster with docker as infrastructure provider"
CLUSTER_TOPOLOGY=true $(CLUSTERCTL) init --infrastructure docker

create-workload-cluster: $(KIND) $(KUBECTL)
@echo "Create a workload cluster"
$(KUBECTL) apply -f $(KIND_CLUSTER_YAML)

@echo "wait for cluster to be provisioned"
$(KUBECTL) wait cluster $(WORKLOAD_CLUSTER_NAME) -n default --for=jsonpath='{.status.phase}'=Provisioned --timeout=$(TIMEOUT)

@echo "sleep allowing control plane to be ready"
sleep 100

@echo "get kubeconfig to access workload cluster"
$(KIND) get kubeconfig --name $(WORKLOAD_CLUSTER_NAME) > test/fv/workload_kubeconfig

@echo "install calico on workload cluster"
$(KUBECTL) --kubeconfig=./test/fv/workload_kubeconfig apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/calico.yaml

@echo wait for calico pod
$(KUBECTL) --kubeconfig=./test/fv/workload_kubeconfig wait --for=condition=Available deployment/calico-kube-controllers -n kube-system --timeout=$(TIMEOUT)

deploy-projectsveltos: $(KUSTOMIZE)
# Load projectsveltos image into cluster
@echo 'Load projectsveltos image into cluster'
Expand Down
3 changes: 2 additions & 1 deletion api/v1alpha1/clusterprofile_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,8 @@ type PolicyRef struct {
// Namespace of the referenced resource.
// Namespace can be left empty. In such a case, namespace will
// be implicit set to cluster's namespace.
Namespace string `json:"namespace"`
// +optional
Namespace string `json:"namespace,omitempty"`

// Name of the referenced resource.
// +kubebuilder:validation:MinLength=1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,6 @@ spec:
required:
- kind
- name
- namespace
type: object
type: array
reloader:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,6 @@ spec:
required:
- kind
- name
- namespace
type: object
type: array
reloader:
Expand Down
2 changes: 1 addition & 1 deletion config/default/manager_image_patch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,5 @@ spec:
spec:
containers:
# Change the value of image field below to your controller image URL
- image: projectsveltos/addon-controller-amd64:main
- image: projectsveltos/addon-controller-amd64:dev
name: controller
4 changes: 1 addition & 3 deletions manifest/manifest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,6 @@ spec:
required:
- kind
- name
- namespace
type: object
type: array
reloader:
Expand Down Expand Up @@ -1176,7 +1175,6 @@ spec:
required:
- kind
- name
- namespace
type: object
type: array
reloader:
Expand Down Expand Up @@ -1863,7 +1861,7 @@ spec:
- --v=5
command:
- /manager
image: projectsveltos/addon-controller-amd64:main
image: projectsveltos/addon-controller-amd64:dev
livenessProbe:
httpGet:
path: /healthz
Expand Down
56 changes: 6 additions & 50 deletions test/clusterapi-workload.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,43 +34,6 @@ spec:
description: Sets the imageRepository used for the KubeadmControlPlane.
enabledIf: '{{ ne .imageRepository "" }}'
name: imageRepository
- definitions:
- jsonPatches:
- op: add
path: /spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/cgroup-driver
value: cgroupfs
- op: add
path: /spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cgroup-driver
value: cgroupfs
selector:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlaneTemplate
matchResources:
controlPlane: true
description: |
Sets the cgroupDriver to cgroupfs if a Kubernetes version < v1.24 is referenced.
This is required because kind and the node images do not support the default
systemd cgroupDriver for kubernetes < v1.24.
enabledIf: '{{ semverCompare "<= v1.23" .builtin.controlPlane.version }}'
name: cgroupDriver-controlPlane
- definitions:
- jsonPatches:
- op: add
path: /spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cgroup-driver
value: cgroupfs
selector:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
matchResources:
machineDeploymentClass:
names:
- default-worker
description: |
Sets the cgroupDriver to cgroupfs if a Kubernetes version < v1.24 is referenced.
This is required because kind and the node images do not support the default
systemd cgroupDriver for kubernetes < v1.24.
enabledIf: '{{ semverCompare "<= v1.23" .builtin.machineDeployment.version }}'
name: cgroupDriver-machineDeployment
- definitions:
- jsonPatches:
- op: add
Expand Down Expand Up @@ -268,15 +231,9 @@ spec:
extraArgs:
enable-hostpath-provisioner: "true"
initConfiguration:
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
nodeRegistration: {}
joinConfiguration:
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
nodeRegistration: {}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
Expand Down Expand Up @@ -311,16 +268,15 @@ spec:
template:
spec:
joinConfiguration:
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
nodeRegistration: {}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: clusterapi-workload
namespace: default
labels:
env: fv
spec:
clusterNetwork:
pods:
Expand Down Expand Up @@ -348,7 +304,7 @@ spec:
enabled: true
enforce: baseline
warn: restricted
version: v1.26.0
version: v1.27.0
workers:
machineDeployments:
- class: default-worker
Expand Down
Loading

0 comments on commit f678a51

Please sign in to comment.